diff options
Diffstat (limited to 'crypto')
202 files changed, 27968 insertions, 19125 deletions
diff --git a/crypto/842.c b/crypto/842.c index e59e54d76960..4007e87bed80 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -18,17 +18,12 @@ * drivers/crypto/nx/nx-842-crypto.c */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/sw842.h> -#include <crypto/internal/scompress.h> - -struct crypto842_ctx { - void *wmem; /* working memory for compress */ -}; -static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) +static void *crypto842_alloc_ctx(void) { void *ctx; @@ -39,38 +34,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int crypto842_init(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->wmem = crypto842_alloc_ctx(NULL); - if (IS_ERR(ctx->wmem)) - return -ENOMEM; - - return 0; -} - -static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void crypto842_free_ctx(void *ctx) { kfree(ctx); } -static void crypto842_exit(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto842_free_ctx(NULL, ctx->wmem); -} - -static int crypto842_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - return sw842_compress(src, slen, dst, dlen, ctx->wmem); -} - static int crypto842_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -78,13 +46,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm, return sw842_compress(src, slen, dst, dlen, ctx); } -static int crypto842_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - return sw842_decompress(src, slen, dst, dlen); -} - static int crypto842_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -92,23 +53,11 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm, return sw842_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "842", - .cra_driver_name = "842-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct crypto842_ctx), - .cra_module = THIS_MODULE, - .cra_init = crypto842_init, - .cra_exit = crypto842_exit, - .cra_u = { .compress = { - .coa_compress = crypto842_compress, - .coa_decompress = crypto842_decompress } } -}; - static struct scomp_alg scomp = { - .alloc_ctx = crypto842_alloc_ctx, - .free_ctx = crypto842_free_ctx, + .streams = { + .alloc_ctx = crypto842_alloc_ctx, + .free_ctx = crypto842_free_ctx, + }, .compress = crypto842_scompress, .decompress = crypto842_sdecompress, .base = { @@ -121,25 +70,12 @@ static struct scomp_alg scomp = { static int __init crypto842_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } -subsys_initcall(crypto842_mod_init); +module_init(crypto842_mod_init); static void __exit crypto842_mod_exit(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } module_exit(crypto842_mod_exit); diff --git a/crypto/Kconfig b/crypto/Kconfig index ca3b02dcbbfa..2e5b195b1b06 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,16 +15,17 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" + select CRYPTO_LIB_UTILS help This option provides the core Cryptographic API. if CRYPTO -comment "Crypto core or helper" +menu "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS + depends on CRYPTO_DRBG && CRYPTO_SELFTESTS depends on (MODULE_SIG || !MODULES) help This option enables the fips boot option which is @@ -32,6 +33,27 @@ config CRYPTO_FIPS certification. You should say no unless you know what this is. +config CRYPTO_FIPS_NAME + string "FIPS Module Name" + default "Linux Kernel Cryptographic API" + depends on CRYPTO_FIPS + help + This option sets the FIPS Module name reported by the Crypto API via + the /proc/sys/crypto/fips_name file. + +config CRYPTO_FIPS_CUSTOM_VERSION + bool "Use Custom FIPS Module Version" + depends on CRYPTO_FIPS + default n + +config CRYPTO_FIPS_VERSION + string "FIPS Module Version" + default "(none)" + depends on CRYPTO_FIPS_CUSTOM_VERSION + help + This option provides the ability to override the FIPS Module Version. + By default the KERNELRELEASE value is used. + config CRYPTO_ALGAPI tristate select CRYPTO_ALGAPI2 @@ -49,18 +71,25 @@ config CRYPTO_AEAD config CRYPTO_AEAD2 tristate select CRYPTO_ALGAPI2 - select CRYPTO_NULL2 - select CRYPTO_RNG2 + +config CRYPTO_SIG + tristate + select CRYPTO_SIG2 + select CRYPTO_ALGAPI + +config CRYPTO_SIG2 + tristate + select CRYPTO_ALGAPI2 config CRYPTO_SKCIPHER tristate select CRYPTO_SKCIPHER2 select CRYPTO_ALGAPI + select CRYPTO_ECB config CRYPTO_SKCIPHER2 tristate select CRYPTO_ALGAPI2 - select CRYPTO_RNG2 config CRYPTO_HASH tristate @@ -112,21 +141,30 @@ config CRYPTO_ACOMP select CRYPTO_ALGAPI select CRYPTO_ACOMP2 +config CRYPTO_HKDF + tristate + select CRYPTO_SHA256 if CRYPTO_SELFTESTS + select CRYPTO_SHA512 if CRYPTO_SELFTESTS + select CRYPTO_HASH2 + config CRYPTO_MANAGER - tristate "Cryptographic algorithm manager" + tristate + default CRYPTO_ALGAPI if CRYPTO_SELFTESTS select CRYPTO_MANAGER2 help - Create default cryptographic template instantiations such as - cbc(aes). + This provides the support for instantiating templates such as + cbc(aes), and the support for the crypto self-tests. config CRYPTO_MANAGER2 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) + select CRYPTO_ACOMP2 select CRYPTO_AEAD2 - select CRYPTO_HASH2 - select CRYPTO_SKCIPHER2 select CRYPTO_AKCIPHER2 + select CRYPTO_SIG2 + select CRYPTO_HASH2 select CRYPTO_KPP2 - select CRYPTO_ACOMP2 + select CRYPTO_RNG2 + select CRYPTO_SKCIPHER2 config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" @@ -136,38 +174,44 @@ config CRYPTO_USER Userspace configuration for cryptographic instantiations such as cbc(aes). -config CRYPTO_MANAGER_DISABLE_TESTS - bool "Disable run-time self tests" - default y +config CRYPTO_SELFTESTS + bool "Enable cryptographic self-tests" + depends on EXPERT help - Disable run-time self tests that normally take place at - algorithm registration. + Enable the cryptographic self-tests. + + The cryptographic self-tests run at boot time, or at algorithm + registration time if algorithms are dynamically loaded later. + + There are two main use cases for these tests: + + - Development and pre-release testing. In this case, also enable + CRYPTO_SELFTESTS_FULL to get the full set of tests. All crypto code + in the kernel is expected to pass the full set of tests. + + - Production kernels, to help prevent buggy drivers from being used + and/or meet FIPS 140-3 pre-operational testing requirements. In + this case, enable CRYPTO_SELFTESTS but not CRYPTO_SELFTESTS_FULL. -config CRYPTO_MANAGER_EXTRA_TESTS - bool "Enable extra run-time crypto self tests" - depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER +config CRYPTO_SELFTESTS_FULL + bool "Enable the full set of cryptographic self-tests" + depends on CRYPTO_SELFTESTS help - Enable extra run-time self tests of registered crypto algorithms, - including randomized fuzz tests. + Enable the full set of cryptographic self-tests for each algorithm. - This is intended for developer use only, as these tests take much - longer to run than the normal self tests. + The full set of tests should be enabled for development and + pre-release testing, but not in production kernels. -config CRYPTO_GF128MUL - tristate + All crypto code in the kernel is expected to pass the full tests. config CRYPTO_NULL tristate "Null algorithms" - select CRYPTO_NULL2 + select CRYPTO_ALGAPI + select CRYPTO_SKCIPHER + select CRYPTO_HASH help These are 'Null' algorithms, used by IPsec, which do nothing. -config CRYPTO_NULL2 - tristate - select CRYPTO_ALGAPI2 - select CRYPTO_SKCIPHER2 - select CRYPTO_HASH2 - config CRYPTO_PCRYPT tristate "Parallel crypto engine" depends on SMP @@ -194,17 +238,32 @@ config CRYPTO_AUTHENC select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_HASH - select CRYPTO_NULL help Authenc: Combined mode wrapper for IPsec. - This is required for IPSec. -config CRYPTO_TEST - tristate "Testing module" + This is required for IPSec ESP (XFRM_ESP). + +config CRYPTO_KRB5ENC + tristate "Kerberos 5 combined hash+cipher support" + select CRYPTO_AEAD + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + select CRYPTO_HASH + help + Combined hash and cipher support for Kerberos 5 RFC3961 simplified + profile. This is required for Kerberos 5-style encryption, used by + sunrpc/NFS and rxrpc/AFS. + +config CRYPTO_BENCHMARK + tristate "Crypto benchmarking module" depends on m || EXPERT select CRYPTO_MANAGER help - Quick & dirty crypto test module. + Quick & dirty crypto benchmarking module. + + This is mainly intended for use by people developing cryptographic + algorithms in the kernel. It should not be enabled in production + kernels. config CRYPTO_SIMD tristate @@ -213,314 +272,586 @@ config CRYPTO_SIMD config CRYPTO_ENGINE tristate -comment "Public-key cryptography" +endmenu + +menu "Public-key cryptography" config CRYPTO_RSA - tristate "RSA algorithm" + tristate "RSA (Rivest-Shamir-Adleman)" select CRYPTO_AKCIPHER select CRYPTO_MANAGER + select CRYPTO_SIG select MPILIB select ASN1 help - Generic implementation of the RSA public key algorithm. + RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) config CRYPTO_DH - tristate "Diffie-Hellman algorithm" + tristate "DH (Diffie-Hellman)" select CRYPTO_KPP select MPILIB help - Generic implementation of the Diffie-Hellman algorithm. + DH (Diffie-Hellman) key exchange algorithm + +config CRYPTO_DH_RFC7919_GROUPS + bool "RFC 7919 FFDHE groups" + depends on CRYPTO_DH + select CRYPTO_RNG_DEFAULT + help + FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups + defined in RFC7919. + + Support these finite-field groups in DH key exchanges: + - ffdhe2048, ffdhe3072, ffdhe4096, ffdhe6144, ffdhe8192 + + If unsure, say N. config CRYPTO_ECC tristate + select CRYPTO_RNG_DEFAULT config CRYPTO_ECDH - tristate "ECDH algorithm" + tristate "ECDH (Elliptic Curve Diffie-Hellman)" select CRYPTO_ECC select CRYPTO_KPP - select CRYPTO_RNG_DEFAULT help - Generic implementation of the ECDH algorithm + ECDH (Elliptic Curve Diffie-Hellman) key exchange algorithm + using curves P-192, P-256, and P-384 (FIPS 186) config CRYPTO_ECDSA - tristate "ECDSA (NIST P192, P256 etc.) algorithm" + tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)" select CRYPTO_ECC - select CRYPTO_AKCIPHER + select CRYPTO_SIG select ASN1 help - Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.) - is A NIST cryptographic standard algorithm. Only signature verification - is implemented. + ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186, + ISO/IEC 14888-3) + using curves P-192, P-256, P-384 and P-521 + + Only signature verification is implemented. config CRYPTO_ECRDSA - tristate "EC-RDSA (GOST 34.10) algorithm" + tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)" select CRYPTO_ECC - select CRYPTO_AKCIPHER + select CRYPTO_SIG select CRYPTO_STREEBOG select OID_REGISTRY select ASN1 help Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012, - RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic - standard algorithms (called GOST algorithms). Only signature verification - is implemented. + RFC 7091, ISO/IEC 14888-3) -config CRYPTO_SM2 - tristate "SM2 algorithm" - select CRYPTO_SM3 - select CRYPTO_AKCIPHER - select CRYPTO_MANAGER - select MPILIB - select ASN1 + One of the Russian cryptographic standard algorithms (called GOST + algorithms). Only signature verification is implemented. + +endmenu + +menu "Block ciphers" + +config CRYPTO_AES + tristate "AES (Advanced Encryption Standard)" + select CRYPTO_ALGAPI + select CRYPTO_LIB_AES help - Generic implementation of the SM2 public key algorithm. It was - published by State Encryption Management Bureau, China. - as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) - References: - https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 - http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml - http://www.gmbz.org.cn/main/bzlb.html + Rijndael appears to be consistently a very good performer in + both hardware and software across a wide range of computing + environments regardless of its use in feedback or non-feedback + modes. Its key setup time is excellent, and its key agility is + good. Rijndael's very low memory requirements make it very well + suited for restricted-space environments, in which it also + demonstrates excellent performance. Rijndael's operations are + among the easiest to defend against power and timing attacks. -config CRYPTO_CURVE25519 - tristate "Curve25519 algorithm" - select CRYPTO_KPP - select CRYPTO_LIB_CURVE25519_GENERIC + The AES specifies three key sizes: 128, 192 and 256 bits -config CRYPTO_CURVE25519_X86 - tristate "x86_64 accelerated Curve25519 scalar multiplication library" - depends on X86 && 64BIT - select CRYPTO_LIB_CURVE25519_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CURVE25519 +config CRYPTO_AES_TI + tristate "AES (Advanced Encryption Standard) (fixed time)" + select CRYPTO_ALGAPI + select CRYPTO_LIB_AES + help + AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3) -comment "Authenticated Encryption with Associated Data" + This is a generic implementation of AES that attempts to eliminate + data dependent latencies as much as possible without affecting + performance too much. It is intended for use by the generic CCM + and GCM drivers, and other CTR or CMAC/XCBC based modes that rely + solely on encryption (although decryption is supported as well, but + with a more dramatic performance hit) -config CRYPTO_CCM - tristate "CCM support" - select CRYPTO_CTR - select CRYPTO_HASH - select CRYPTO_AEAD - select CRYPTO_MANAGER + Instead of using 16 lookup tables of 1 KB each, (8 for encryption and + 8 for decryption), this implementation only uses just two S-boxes of + 256 bytes each, and attempts to eliminate data dependent latencies by + prefetching the entire table into the cache at the start of each + block. Interrupts are also disabled to avoid races where cachelines + are evicted when the CPU is interrupted to do something else. + +config CRYPTO_ANUBIS + tristate "Anubis" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI help - Support for Counter with CBC MAC. Required for IPsec. + Anubis cipher algorithm -config CRYPTO_GCM - tristate "GCM/GMAC support" - select CRYPTO_CTR - select CRYPTO_AEAD - select CRYPTO_GHASH - select CRYPTO_NULL - select CRYPTO_MANAGER + Anubis is a variable key length cipher which can use keys from + 128 bits to 320 bits in length. It was evaluated as a entrant + in the NESSIE competition. + + See https://web.archive.org/web/20160606112246/http://www.larc.usp.br/~pbarreto/AnubisPage.html + for further information. + +config CRYPTO_ARIA + tristate "ARIA" + select CRYPTO_ALGAPI help - Support for Galois/Counter Mode (GCM) and Galois Message - Authentication Code (GMAC). Required for IPSec. + ARIA cipher algorithm (RFC5794) -config CRYPTO_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support" - select CRYPTO_CHACHA20 - select CRYPTO_POLY1305 - select CRYPTO_AEAD - select CRYPTO_MANAGER + ARIA is a standard encryption algorithm of the Republic of Korea. + The ARIA specifies three key sizes and rounds. + 128-bit: 12 rounds. + 192-bit: 14 rounds. + 256-bit: 16 rounds. + + See: + https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do + +config CRYPTO_BLOWFISH + tristate "Blowfish" + select CRYPTO_ALGAPI + select CRYPTO_BLOWFISH_COMMON help - ChaCha20-Poly1305 AEAD support, RFC7539. + Blowfish cipher algorithm, by Bruce Schneier - Support for the AEAD wrapper using the ChaCha20 stream cipher combined - with the Poly1305 authenticator. It is defined in RFC7539 for use in - IETF protocols. + This is a variable key length cipher which can use keys from 32 + bits to 448 bits in length. It's fast, simple and specifically + designed for use on "large microprocessors". -config CRYPTO_AEGIS128 - tristate "AEGIS-128 AEAD algorithm" - select CRYPTO_AEAD - select CRYPTO_AES # for AES S-box tables + See https://www.schneier.com/blowfish.html for further information. + +config CRYPTO_BLOWFISH_COMMON + tristate help - Support for the AEGIS-128 dedicated AEAD algorithm. + Common parts of the Blowfish cipher algorithm shared by the + generic c and the assembler implementations. -config CRYPTO_AEGIS128_SIMD - bool "Support SIMD acceleration for AEGIS-128" - depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - default y +config CRYPTO_CAMELLIA + tristate "Camellia" + select CRYPTO_ALGAPI + help + Camellia cipher algorithms (ISO/IEC 18033-3) -config CRYPTO_AEGIS128_AESNI_SSE2 - tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_SIMD + Camellia is a symmetric key block cipher developed jointly + at NTT and Mitsubishi Electric Corporation. + + The Camellia specifies three key sizes: 128, 192 and 256 bits. + + See https://info.isl.ntt.co.jp/crypt/eng/camellia/ for further information. + +config CRYPTO_CAST_COMMON + tristate help - AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm. + Common parts of the CAST cipher algorithms shared by the + generic c and the assembler implementations. -config CRYPTO_SEQIV - tristate "Sequence Number IV Generator" - select CRYPTO_AEAD +config CRYPTO_CAST5 + tristate "CAST5 (CAST-128)" + select CRYPTO_ALGAPI + select CRYPTO_CAST_COMMON + help + CAST5 (CAST-128) cipher algorithm (RFC2144, ISO/IEC 18033-3) + +config CRYPTO_CAST6 + tristate "CAST6 (CAST-256)" + select CRYPTO_ALGAPI + select CRYPTO_CAST_COMMON + help + CAST6 (CAST-256) encryption algorithm (RFC2612) + +config CRYPTO_DES + tristate "DES and Triple DES EDE" + select CRYPTO_ALGAPI + select CRYPTO_LIB_DES + help + DES (Data Encryption Standard)(FIPS 46-2, ISO/IEC 18033-3) and + Triple DES EDE (Encrypt/Decrypt/Encrypt) (FIPS 46-3, ISO/IEC 18033-3) + cipher algorithms + +config CRYPTO_FCRYPT + tristate "FCrypt" + select CRYPTO_ALGAPI select CRYPTO_SKCIPHER - select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT - select CRYPTO_MANAGER help - This IV generator generates an IV based on a sequence number by - xoring it with a salt. This algorithm is mainly useful for CTR + FCrypt algorithm used by RxRPC -config CRYPTO_ECHAINIV - tristate "Encrypted Chain IV Generator" - select CRYPTO_AEAD - select CRYPTO_NULL - select CRYPTO_RNG_DEFAULT + See https://ota.polyonymo.us/fcrypt-paper.txt + +config CRYPTO_KHAZAD + tristate "Khazad" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + Khazad cipher algorithm + + Khazad was a finalist in the initial NESSIE competition. It is + an algorithm optimized for 64-bit processors with good performance + on 32-bit processors. Khazad uses an 128 bit key size. + + See https://web.archive.org/web/20171011071731/http://www.larc.usp.br/~pbarreto/KhazadPage.html + for further information. + +config CRYPTO_SEED + tristate "SEED" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + SEED cipher algorithm (RFC4269, ISO/IEC 18033-3) + + SEED is a 128-bit symmetric key block cipher that has been + developed by KISA (Korea Information Security Agency) as a + national standard encryption algorithm of the Republic of Korea. + It is a 16 round block cipher with the key size of 128 bit. + + See https://seed.kisa.or.kr/kisa/algorithm/EgovSeedInfo.do + for further information. + +config CRYPTO_SERPENT + tristate "Serpent" + select CRYPTO_ALGAPI + help + Serpent cipher algorithm, by Anderson, Biham & Knudsen + + Keys are allowed to be from 0 to 256 bits in length, in steps + of 8 bits. + + See https://www.cl.cam.ac.uk/~rja14/serpent.html for further information. + +config CRYPTO_SM4 + tristate + +config CRYPTO_SM4_GENERIC + tristate "SM4 (ShangMi 4)" + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (OSCCA GB/T 32907-2016, + ISO/IEC 18033-3:2010/Amd 1:2021) + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + SMS4 was originally created for use in protecting wireless + networks, and is mandated in the Chinese National Standard for + Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) + (GB.15629.11-2003). + + The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and + standardized through TC 260 of the Standardization Administration + of the People's Republic of China (SAC). + + The input, output, and key of SMS4 are each 128 bits. + + See https://eprint.iacr.org/2008/329.pdf for further information. + + If unsure, say N. + +config CRYPTO_TEA + tristate "TEA, XTEA and XETA" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_ALGAPI + help + TEA (Tiny Encryption Algorithm) cipher algorithms + + Tiny Encryption Algorithm is a simple cipher that uses + many rounds for security. It is very fast and uses + little memory. + + Xtendend Tiny Encryption Algorithm is a modification to + the TEA algorithm to address a potential key weakness + in the TEA algorithm. + + Xtendend Encryption Tiny Algorithm is a mis-implementation + of the XTEA algorithm for compatibility purposes. + +config CRYPTO_TWOFISH + tristate "Twofish" + select CRYPTO_ALGAPI + select CRYPTO_TWOFISH_COMMON + help + Twofish cipher algorithm + + Twofish was submitted as an AES (Advanced Encryption Standard) + candidate cipher by researchers at CounterPane Systems. It is a + 16 round block cipher supporting key sizes of 128, 192, and 256 + bits. + + See https://www.schneier.com/twofish.html for further information. + +config CRYPTO_TWOFISH_COMMON + tristate + help + Common parts of the Twofish cipher algorithm shared by the + generic c and the assembler implementations. + +endmenu + +menu "Length-preserving ciphers and modes" + +config CRYPTO_ADIANTUM + tristate "Adiantum" + select CRYPTO_CHACHA20 + select CRYPTO_LIB_POLY1305 + select CRYPTO_LIB_POLY1305_GENERIC + select CRYPTO_NHPOLY1305 select CRYPTO_MANAGER help - This IV generator generates an IV based on the encryption of - a sequence number xored with a salt. This is the default - algorithm for CBC. + Adiantum tweakable, length-preserving encryption mode -comment "Block modes" + Designed for fast and secure disk encryption, especially on + CPUs without dedicated crypto instructions. It encrypts + each sector using the XChaCha12 stream cipher, two passes of + an ε-almost-∆-universal hash function, and an invocation of + the AES-256 block cipher on a single 16-byte block. On CPUs + without AES instructions, Adiantum is much faster than + AES-XTS. -config CRYPTO_CBC - tristate "CBC support" + Adiantum's security is provably reducible to that of its + underlying stream and block ciphers, subject to a security + bound. Unlike XTS, Adiantum is a true wide-block encryption + mode, so it actually provides an even stronger notion of + security than XTS, subject to the security bound. + + If unsure, say N. + +config CRYPTO_ARC4 + tristate "ARC4 (Alleged Rivest Cipher 4)" + depends on CRYPTO_USER_API_ENABLE_OBSOLETE + select CRYPTO_SKCIPHER + select CRYPTO_LIB_ARC4 + help + ARC4 cipher algorithm + + ARC4 is a stream cipher using keys ranging from 8 bits to 2048 + bits in length. This algorithm is required for driver-based + WEP, but it should not be for other purposes because of the + weakness of the algorithm. + +config CRYPTO_CHACHA20 + tristate "ChaCha" + select CRYPTO_LIB_CHACHA select CRYPTO_SKCIPHER - select CRYPTO_MANAGER help - CBC: Cipher Block Chaining mode - This block cipher algorithm is required for IPSec. + The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms + + ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. + Bernstein and further specified in RFC7539 for use in IETF protocols. + This is the portable C implementation of ChaCha20. See + https://cr.yp.to/chacha/chacha-20080128.pdf for further information. + + XChaCha20 is the application of the XSalsa20 construction to ChaCha20 + rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length + from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, + while provably retaining ChaCha20's security. See + https://cr.yp.to/snuffle/xsalsa-20081128.pdf for further information. -config CRYPTO_CFB - tristate "CFB support" + XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly + reduced security margin but increased performance. It can be needed + in some performance-sensitive scenarios. + +config CRYPTO_CBC + tristate "CBC (Cipher Block Chaining)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CFB: Cipher FeedBack mode - This block cipher algorithm is required for TPM2 Cryptography. + CBC (Cipher Block Chaining) mode (NIST SP800-38A) + + This block cipher mode is required for IPSec ESP (XFRM_ESP). config CRYPTO_CTR - tristate "CTR support" + tristate "CTR (Counter)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CTR: Counter mode - This block cipher algorithm is required for IPSec. + CTR (Counter) mode (NIST SP800-38A) config CRYPTO_CTS - tristate "CTS support" + tristate "CTS (Cipher Text Stealing)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - CTS: Cipher Text Stealing - This is the Cipher Text Stealing mode as described by - Section 8 of rfc2040 and referenced by rfc3962 - (rfc3962 includes errata information in its Appendix A) or - CBC-CS3 as defined by NIST in Sp800-38A addendum from Oct 2010. + CBC-CS3 variant of CTS (Cipher Text Stealing) (NIST + Addendum to SP800-38A (October 2010)) + This mode is required for Kerberos gss mechanism support for AES encryption. - See: https://csrc.nist.gov/publications/detail/sp/800-38a/addendum/final - config CRYPTO_ECB - tristate "ECB support" - select CRYPTO_SKCIPHER + tristate "ECB (Electronic Codebook)" + select CRYPTO_SKCIPHER2 + select CRYPTO_MANAGER + help + ECB (Electronic Codebook) mode (NIST SP800-38A) + +config CRYPTO_HCTR2 + tristate "HCTR2" + select CRYPTO_XCTR + select CRYPTO_LIB_POLYVAL select CRYPTO_MANAGER help - ECB: Electronic CodeBook mode - This is the simplest block cipher algorithm. It simply encrypts - the input block by block. + HCTR2 length-preserving encryption mode + + A mode for storage encryption that is efficient on processors with + instructions to accelerate AES and carryless multiplication, e.g. + x86 processors with AES-NI and CLMUL, and ARM processors with the + ARMv8 crypto extensions. + + See https://eprint.iacr.org/2021/1441 config CRYPTO_LRW - tristate "LRW support" + tristate "LRW (Liskov Rivest Wagner)" + select CRYPTO_LIB_GF128MUL select CRYPTO_SKCIPHER select CRYPTO_MANAGER - select CRYPTO_GF128MUL + select CRYPTO_ECB help - LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable + LRW (Liskov Rivest Wagner) mode + + A tweakable, non malleable, non movable narrow block cipher mode for dm-crypt. Use it with cipher specification string aes-lrw-benbi, the key must be 256, 320 or 384. The first 128, 192 or 256 bits in the key are used for AES and the rest is used to tie each cipher block to its logical position. -config CRYPTO_OFB - tristate "OFB support" + See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf + +config CRYPTO_PCBC + tristate "PCBC (Propagating Cipher Block Chaining)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - OFB: the Output Feedback mode makes a block cipher into a synchronous - stream cipher. It generates keystream blocks, which are then XORed - with the plaintext blocks to get the ciphertext. Flipping a bit in the - ciphertext produces a flipped bit in the plaintext at the same - location. This property allows many error correcting codes to function - normally even when applied before encryption. + PCBC (Propagating Cipher Block Chaining) mode -config CRYPTO_PCBC - tristate "PCBC support" + This block cipher mode is required for RxRPC. + +config CRYPTO_XCTR + tristate select CRYPTO_SKCIPHER select CRYPTO_MANAGER help - PCBC: Propagating Cipher Block Chaining mode - This block cipher algorithm is required for RxRPC. + XCTR (XOR Counter) mode for HCTR2 + + This blockcipher mode is a variant of CTR mode using XORs and little-endian + addition rather than big-endian arithmetic. + + XCTR mode is used to implement HCTR2. config CRYPTO_XTS - tristate "XTS support" + tristate "XTS (XOR Encrypt XOR with ciphertext stealing)" select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_ECB help - XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, - key size 256, 384 or 512 bits. This implementation currently - can't handle a sectorsize which is not a multiple of 16 bytes. + XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E + and IEEE 1619) -config CRYPTO_KEYWRAP - tristate "Key wrapping support" - select CRYPTO_SKCIPHER - select CRYPTO_MANAGER - help - Support for key wrapping (NIST SP800-38F / RFC3394) without - padding. + Use with aes-xts-plain, key size 256, 384 or 512 bits. This + implementation currently can't handle a sectorsize which is not a + multiple of 16 bytes. config CRYPTO_NHPOLY1305 tristate select CRYPTO_HASH + select CRYPTO_LIB_POLY1305 select CRYPTO_LIB_POLY1305_GENERIC -config CRYPTO_NHPOLY1305_SSE2 - tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_NHPOLY1305 +endmenu + +menu "AEAD (authenticated encryption with associated data) ciphers" + +config CRYPTO_AEGIS128 + tristate "AEGIS-128" + select CRYPTO_AEAD + select CRYPTO_AES # for AES S-box tables help - SSE2 optimized implementation of the hash function used by the - Adiantum encryption mode. + AEGIS-128 AEAD algorithm -config CRYPTO_NHPOLY1305_AVX2 - tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)" - depends on X86 && 64BIT - select CRYPTO_NHPOLY1305 +config CRYPTO_AEGIS128_SIMD + bool "AEGIS-128 (arm NEON, arm64 NEON)" + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + default y help - AVX2 optimized implementation of the hash function used by the - Adiantum encryption mode. + AEGIS-128 AEAD algorithm -config CRYPTO_ADIANTUM - tristate "Adiantum support" + Architecture: arm or arm64 using: + - NEON (Advanced SIMD) extension + +config CRYPTO_CHACHA20POLY1305 + tristate "ChaCha20-Poly1305" select CRYPTO_CHACHA20 - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_NHPOLY1305 + select CRYPTO_AEAD + select CRYPTO_LIB_POLY1305 select CRYPTO_MANAGER help - Adiantum is a tweakable, length-preserving encryption mode - designed for fast and secure disk encryption, especially on - CPUs without dedicated crypto instructions. It encrypts - each sector using the XChaCha12 stream cipher, two passes of - an ε-almost-∆-universal hash function, and an invocation of - the AES-256 block cipher on a single 16-byte block. On CPUs - without AES instructions, Adiantum is much faster than - AES-XTS. + ChaCha20 stream cipher and Poly1305 authenticator combined + mode (RFC8439) - Adiantum's security is provably reducible to that of its - underlying stream and block ciphers, subject to a security - bound. Unlike XTS, Adiantum is a true wide-block encryption - mode, so it actually provides an even stronger notion of - security than XTS, subject to the security bound. +config CRYPTO_CCM + tristate "CCM (Counter with Cipher Block Chaining-MAC)" + select CRYPTO_CTR + select CRYPTO_HASH + select CRYPTO_AEAD + select CRYPTO_MANAGER + help + CCM (Counter with Cipher Block Chaining-Message Authentication Code) + authenticated encryption mode (NIST SP800-38C) - If unsure, say N. +config CRYPTO_GCM + tristate "GCM (Galois/Counter Mode) and GMAC (GCM MAC)" + select CRYPTO_CTR + select CRYPTO_AEAD + select CRYPTO_GHASH + select CRYPTO_MANAGER + help + GCM (Galois/Counter Mode) authenticated encryption mode and GMAC + (GCM Message Authentication Code) (NIST SP800-38D) + + This is required for IPSec ESP (XFRM_ESP). + +config CRYPTO_GENIV + tristate + select CRYPTO_AEAD + select CRYPTO_MANAGER + select CRYPTO_RNG_DEFAULT + +config CRYPTO_SEQIV + tristate "Sequence Number IV Generator" + select CRYPTO_GENIV + help + Sequence Number IV generator + + This IV generator generates an IV based on a sequence number by + xoring it with a salt. This algorithm is mainly useful for CTR. + + This is required for IPsec ESP (XFRM_ESP). + +config CRYPTO_ECHAINIV + tristate "Encrypted Chain IV Generator" + select CRYPTO_GENIV + help + Encrypted Chain IV generator + + This IV generator generates an IV based on the encryption of + a sequence number xored with a salt. This is the default + algorithm for CBC. config CRYPTO_ESSIV - tristate "ESSIV support for block encryption" + tristate "Encrypted Salt-Sector IV Generator" select CRYPTO_AUTHENC help - Encrypted salt-sector initialization vector (ESSIV) is an IV - generation method that is used in some cases by fscrypt and/or + Encrypted Salt-Sector IV generator + + This IV generator is used in some cases by fscrypt and/or dm-crypt. It uses the hash of the block encryption key as the symmetric key for a block encryption pass applied to the input IV, making low entropy IV sources more suitable for block @@ -543,1224 +874,299 @@ config CRYPTO_ESSIV combined with ESSIV the only feasible mode for h/w accelerated block encryption) -comment "Hash modes" - -config CRYPTO_CMAC - tristate "CMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - Cipher-based Message Authentication Code (CMAC) specified by - The National Institute of Standards and Technology (NIST). - - https://tools.ietf.org/html/rfc4493 - http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf - -config CRYPTO_HMAC - tristate "HMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - HMAC: Keyed-Hashing for Message Authentication (RFC2104). - This is required for IPSec. - -config CRYPTO_XCBC - tristate "XCBC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - XCBC: Keyed-Hashing with encryption algorithm - https://www.ietf.org/rfc/rfc3566.txt - http://csrc.nist.gov/encryption/modes/proposedmodes/ - xcbc-mac/xcbc-mac-spec.pdf +endmenu -config CRYPTO_VMAC - tristate "VMAC support" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - VMAC is a message authentication algorithm designed for - very high speed on 64-bit architectures. - - See also: - <https://fastcrypto.org/vmac> - -comment "Digest" - -config CRYPTO_CRC32C - tristate "CRC32c CRC algorithm" - select CRYPTO_HASH - select CRC32 - help - Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used - by iSCSI for header and data digests and by others. - See Castagnoli93. Module will be crc32c. - -config CRYPTO_CRC32C_INTEL - tristate "CRC32c INTEL hardware acceleration" - depends on X86 - select CRYPTO_HASH - help - In Intel processor with SSE4.2 supported, the processor will - support CRC32C implementation using hardware accelerated CRC32 - instruction. This option will create 'crc32c-intel' module, - which will enable any routine to use the CRC32 instruction to - gain performance compared with software implementation. - Module will be crc32c-intel. - -config CRYPTO_CRC32C_VPMSUM - tristate "CRC32c CRC algorithm (powerpc64)" - depends on PPC64 && ALTIVEC - select CRYPTO_HASH - select CRC32 - help - CRC32c algorithm implemented using vector polynomial multiply-sum - (vpmsum) instructions, introduced in POWER8. Enable on POWER8 - and newer processors for improved performance. - - -config CRYPTO_CRC32C_SPARC64 - tristate "CRC32c CRC algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_HASH - select CRC32 - help - CRC32c CRC algorithm implemented using sparc64 crypto instructions, - when available. - -config CRYPTO_CRC32 - tristate "CRC32 CRC algorithm" - select CRYPTO_HASH - select CRC32 - help - CRC-32-IEEE 802.3 cyclic redundancy-check algorithm. - Shash crypto api wrappers to crc32_le function. - -config CRYPTO_CRC32_PCLMUL - tristate "CRC32 PCLMULQDQ hardware acceleration" - depends on X86 - select CRYPTO_HASH - select CRC32 - help - From Intel Westmere and AMD Bulldozer processor with SSE4.2 - and PCLMULQDQ supported, the processor will support - CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ - instruction. This option will create 'crc32-pclmul' module, - which will enable any routine to use the CRC-32-IEEE 802.3 checksum - and gain better performance as compared with the table implementation. - -config CRYPTO_CRC32_MIPS - tristate "CRC32c and CRC32 CRC algorithm (MIPS)" - depends on MIPS_CRC_SUPPORT - select CRYPTO_HASH - help - CRC32c and CRC32 CRC algorithms implemented using mips crypto - instructions, when available. - - -config CRYPTO_XXHASH - tristate "xxHash hash algorithm" - select CRYPTO_HASH - select XXHASH - help - xxHash non-cryptographic hash algorithm. Extremely fast, working at - speeds close to RAM limits. +menu "Hashes, digests, and MACs" config CRYPTO_BLAKE2B - tristate "BLAKE2b digest algorithm" + tristate "BLAKE2b" select CRYPTO_HASH + select CRYPTO_LIB_BLAKE2B help - Implementation of cryptographic hash function BLAKE2b (or just BLAKE2), - optimized for 64bit platforms and can produce digests of any size - between 1 to 64. The keyed hash is also implemented. + BLAKE2b cryptographic hash function (RFC 7693) - This module provides the following algorithms: + BLAKE2b is optimized for 64-bit platforms and can produce digests + of any size between 1 and 64 bytes. The keyed hash is also implemented. + This module provides the following algorithms: - blake2b-160 - blake2b-256 - blake2b-384 - blake2b-512 - See https://blake2.net for further information. - -config CRYPTO_BLAKE2S - tristate "BLAKE2s digest algorithm" - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_HASH - help - Implementation of cryptographic hash function BLAKE2s - optimized for 8-32bit platforms and can produce digests of any size - between 1 to 32. The keyed hash is also implemented. - - This module provides the following algorithms: - - - blake2s-128 - - blake2s-160 - - blake2s-224 - - blake2s-256 + Used by the btrfs filesystem. See https://blake2.net for further information. -config CRYPTO_BLAKE2S_X86 - tristate "BLAKE2s digest algorithm (x86 accelerated version)" - depends on X86 && 64BIT - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S - -config CRYPTO_CRCT10DIF - tristate "CRCT10DIF algorithm" - select CRYPTO_HASH - help - CRC T10 Data Integrity Field computation is being cast as - a crypto transform. This allows for faster crc t10 diff - transforms to be used if they are available. - -config CRYPTO_CRCT10DIF_PCLMUL - tristate "CRCT10DIF PCLMULQDQ hardware acceleration" - depends on X86 && 64BIT && CRC_T10DIF - select CRYPTO_HASH - help - For x86_64 processors with SSE4.2 and PCLMULQDQ supported, - CRC T10 DIF PCLMULQDQ computation can be hardware - accelerated PCLMULQDQ instruction. This option will create - 'crct10dif-pclmul' module, which is faster when computing the - crct10dif checksum as compared with the generic table implementation. - -config CRYPTO_CRCT10DIF_VPMSUM - tristate "CRC32T10DIF powerpc64 hardware acceleration" - depends on PPC64 && ALTIVEC && CRC_T10DIF +config CRYPTO_CMAC + tristate "CMAC (Cipher-based MAC)" select CRYPTO_HASH + select CRYPTO_MANAGER help - CRC10T10DIF algorithm implemented using vector polynomial - multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on - POWER8 and newer processors for improved performance. - -config CRYPTO_VPMSUM_TESTER - tristate "Powerpc64 vpmsum hardware acceleration tester" - depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM - help - Stress test for CRC32c and CRC-T10DIF algorithms implemented with - POWER8 vpmsum instructions. - Unless you are testing these algorithms, you don't need this. + CMAC (Cipher-based Message Authentication Code) authentication + mode (NIST SP800-38B and IETF RFC4493) config CRYPTO_GHASH - tristate "GHASH hash function" - select CRYPTO_GF128MUL + tristate "GHASH" select CRYPTO_HASH + select CRYPTO_LIB_GF128MUL help - GHASH is the hash function used in GCM (Galois/Counter Mode). - It is not a general-purpose cryptographic hash function. + GCM GHASH function (NIST SP800-38D) -config CRYPTO_POLY1305 - tristate "Poly1305 authenticator algorithm" +config CRYPTO_HMAC + tristate "HMAC (Keyed-Hash MAC)" select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm, RFC7539. - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the portable C implementation of Poly1305. - -config CRYPTO_POLY1305_X86_64 - tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" - depends on X86 && 64BIT - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_ARCH_HAVE_LIB_POLY1305 + select CRYPTO_MANAGER help - Poly1305 authenticator algorithm, RFC7539. - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the x86_64 assembler implementation using SIMD - instructions. + HMAC (Keyed-Hash Message Authentication Code) (FIPS 198 and + RFC2104) -config CRYPTO_POLY1305_MIPS - tristate "Poly1305 authenticator algorithm (MIPS optimized)" - depends on MIPS - select CRYPTO_ARCH_HAVE_LIB_POLY1305 + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). config CRYPTO_MD4 - tristate "MD4 digest algorithm" + tristate "MD4" select CRYPTO_HASH help - MD4 message digest algorithm (RFC1320). + MD4 message digest algorithm (RFC1320) config CRYPTO_MD5 - tristate "MD5 digest algorithm" + tristate "MD5" select CRYPTO_HASH + select CRYPTO_LIB_MD5 help - MD5 message digest algorithm (RFC1321). + MD5 message digest algorithm (RFC1321), including HMAC support. -config CRYPTO_MD5_OCTEON - tristate "MD5 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_MD5 - select CRYPTO_HASH - help - MD5 message digest algorithm (RFC1321) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_MD5_PPC - tristate "MD5 digest algorithm (PPC)" - depends on PPC +config CRYPTO_MICHAEL_MIC + tristate "Michael MIC" select CRYPTO_HASH help - MD5 message digest algorithm (RFC1321) implemented - in PPC assembler. + Michael MIC (Message Integrity Code) (IEEE 802.11i) -config CRYPTO_MD5_SPARC64 - tristate "MD5 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_MD5 - select CRYPTO_HASH - help - MD5 message digest algorithm (RFC1321) implemented - using sparc64 crypto instructions, when available. + Defined by the IEEE 802.11i TKIP (Temporal Key Integrity Protocol), + known as WPA (Wif-Fi Protected Access). -config CRYPTO_MICHAEL_MIC - tristate "Michael MIC keyed digest algorithm" - select CRYPTO_HASH - help - Michael MIC is used for message integrity protection in TKIP - (IEEE 802.11i). This algorithm is required for TKIP, but it - should not be used for other purposes because of the weakness - of the algorithm. + This algorithm is required for TKIP, but it should not be used for + other purposes because of the weakness of the algorithm. config CRYPTO_RMD160 - tristate "RIPEMD-160 digest algorithm" + tristate "RIPEMD-160" select CRYPTO_HASH help - RIPEMD-160 (ISO/IEC 10118-3:2004). + RIPEMD-160 hash function (ISO/IEC 10118-3) RIPEMD-160 is a 160-bit cryptographic hash function. It is intended to be used as a secure replacement for the 128-bit hash functions - MD4, MD5 and it's predecessor RIPEMD + MD4, MD5 and its predecessor RIPEMD (not to be confused with RIPEMD-128). - It's speed is comparable to SHA1 and there are no known attacks + Its speed is comparable to SHA-1 and there are no known attacks against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html + for further information. config CRYPTO_SHA1 - tristate "SHA1 digest algorithm" + tristate "SHA-1" select CRYPTO_HASH + select CRYPTO_LIB_SHA1 help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). - -config CRYPTO_SHA1_SSSE3 - tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions or Advanced Vector - Extensions (AVX/AVX2) or SHA-NI(SHA Extensions New Instructions), - when available. - -config CRYPTO_SHA256_SSSE3 - tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector - Extensions version 1 (AVX1), or Advanced Vector Extensions - version 2 (AVX2) instructions, or SHA-NI (SHA Extensions New - Instructions) when available. - -config CRYPTO_SHA512_SSSE3 - tristate "SHA512 digest algorithm (SSSE3/AVX/AVX2)" - depends on X86 && 64BIT - select CRYPTO_SHA512 - select CRYPTO_HASH - help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector - Extensions version 1 (AVX1), or Advanced Vector Extensions - version 2 (AVX2) instructions, when available. - -config CRYPTO_SHA1_OCTEON - tristate "SHA1 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_SHA1_SPARC64 - tristate "SHA1 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. - -config CRYPTO_SHA1_PPC - tristate "SHA1 digest algorithm (powerpc)" - depends on PPC - help - This is the powerpc hardware accelerated implementation of the - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). - -config CRYPTO_SHA1_PPC_SPE - tristate "SHA1 digest algorithm (PPC SPE)" - depends on PPC && SPE - help - SHA-1 secure hash standard (DFIPS 180-4) implemented - using powerpc SPE SIMD instruction set. + SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3), including + HMAC support. config CRYPTO_SHA256 - tristate "SHA224 and SHA256 digest algorithm" + tristate "SHA-224 and SHA-256" select CRYPTO_HASH select CRYPTO_LIB_SHA256 help - SHA256 secure hash standard (DFIPS 180-2). - - This version of SHA implements a 256 bit hash with 128 bits of - security against collision attacks. + SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC + 10118-3), including HMAC support. - This code also includes SHA-224, a 224 bit hash with 112 bits - of security against collision attacks. - -config CRYPTO_SHA256_PPC_SPE - tristate "SHA224 and SHA256 digest algorithm (PPC SPE)" - depends on PPC && SPE - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA224 and SHA256 secure hash standard (DFIPS 180-2) - implemented using powerpc SPE SIMD instruction set. - -config CRYPTO_SHA256_OCTEON - tristate "SHA224 and SHA256 digest algorithm (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_SHA256_SPARC64 - tristate "SHA224 and SHA256 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-256 secure hash standard (DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. + This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). + Used by the btrfs filesystem, Ceph, NFS, and SMB. config CRYPTO_SHA512 - tristate "SHA384 and SHA512 digest algorithms" - select CRYPTO_HASH - help - SHA512 secure hash standard (DFIPS 180-2). - - This version of SHA implements a 512 bit hash with 256 bits of - security against collision attacks. - - This code also includes SHA-384, a 384 bit hash with 192 bits - of security against collision attacks. - -config CRYPTO_SHA512_OCTEON - tristate "SHA384 and SHA512 digest algorithms (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA512 - select CRYPTO_HASH - help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using OCTEON crypto instructions, when available. - -config CRYPTO_SHA512_SPARC64 - tristate "SHA384 and SHA512 digest algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_SHA512 + tristate "SHA-384 and SHA-512" select CRYPTO_HASH + select CRYPTO_LIB_SHA512 help - SHA-512 secure hash standard (DFIPS 180-2) implemented - using sparc64 crypto instructions, when available. + SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC + 10118-3), including HMAC support. config CRYPTO_SHA3 - tristate "SHA3 digest algorithm" + tristate "SHA-3" select CRYPTO_HASH + select CRYPTO_LIB_SHA3 help - SHA-3 secure hash standard (DFIPS 202). It's based on - cryptographic sponge function family called Keccak. - - References: - http://keccak.noekeon.org/ + SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) -config CRYPTO_SM3 - tristate "SM3 digest algorithm" +config CRYPTO_SM3_GENERIC + tristate "SM3 (ShangMi 3)" select CRYPTO_HASH + select CRYPTO_LIB_SM3 help - SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). - It is part of the Chinese Commercial Cryptography suite. + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3) + + This is part of the Chinese Commercial Cryptography suite. References: http://www.oscca.gov.cn/UpFile/20101222141857786.pdf https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash config CRYPTO_STREEBOG - tristate "Streebog Hash Function" + tristate "Streebog" select CRYPTO_HASH help - Streebog Hash Function (GOST R 34.11-2012, RFC 6986) is one of the Russian - cryptographic standard algorithms (called GOST algorithms). - This setting enables two hash algorithms with 256 and 512 bits output. + Streebog Hash Function (GOST R 34.11-2012, RFC 6986, ISO/IEC 10118-3) + + This is one of the Russian cryptographic standard algorithms (called + GOST algorithms). This setting enables two hash algorithms with + 256 and 512 bits output. References: https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf https://tools.ietf.org/html/rfc6986 config CRYPTO_WP512 - tristate "Whirlpool digest algorithms" + tristate "Whirlpool" select CRYPTO_HASH help - Whirlpool hash algorithm 512, 384 and 256-bit hashes - - Whirlpool-512 is part of the NESSIE cryptographic primitives. - Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard - - See also: - <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> - -config CRYPTO_GHASH_CLMUL_NI_INTEL - tristate "GHASH hash function (CLMUL-NI accelerated)" - depends on X86 && 64BIT - select CRYPTO_CRYPTD - help - This is the x86_64 CLMUL-NI accelerated implementation of - GHASH, the hash function used in GCM (Galois/Counter mode). - -comment "Ciphers" - -config CRYPTO_AES - tristate "AES cipher algorithms" - select CRYPTO_ALGAPI - select CRYPTO_LIB_AES - help - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. + Whirlpool hash function (ISO/IEC 10118-3) - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. + 512, 384 and 256-bit hashes. - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/CryptoToolkit/aes/> for more information. - -config CRYPTO_AES_TI - tristate "Fixed time AES cipher" - select CRYPTO_ALGAPI - select CRYPTO_LIB_AES - help - This is a generic implementation of AES that attempts to eliminate - data dependent latencies as much as possible without affecting - performance too much. It is intended for use by the generic CCM - and GCM drivers, and other CTR or CMAC/XCBC based modes that rely - solely on encryption (although decryption is supported as well, but - with a more dramatic performance hit) - - Instead of using 16 lookup tables of 1 KB each, (8 for encryption and - 8 for decryption), this implementation only uses just two S-boxes of - 256 bytes each, and attempts to eliminate data dependent latencies by - prefetching the entire table into the cache at the start of each - block. Interrupts are also disabled to avoid races where cachelines - are evicted when the CPU is interrupted to do something else. - -config CRYPTO_AES_NI_INTEL - tristate "AES cipher algorithms (AES-NI)" - depends on X86 - select CRYPTO_AEAD - select CRYPTO_LIB_AES - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - select CRYPTO_SIMD - help - Use Intel AES-NI instructions for AES algorithm. - - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/encryption/aes/> for more information. - - In addition to AES cipher algorithm support, the acceleration - for some popular block cipher mode is supported too, including - ECB, CBC, LRW, XTS. The 64 bit version has additional - acceleration for CTR. - -config CRYPTO_AES_SPARC64 - tristate "AES cipher algorithms (SPARC64)" - depends on SPARC64 - select CRYPTO_SKCIPHER - help - Use SPARC64 crypto opcodes for AES algorithm. - - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See <http://csrc.nist.gov/encryption/aes/> for more information. - - In addition to AES cipher algorithm support, the acceleration - for some popular block cipher mode is supported too, including - ECB and CBC. - -config CRYPTO_AES_PPC_SPE - tristate "AES cipher algorithms (PPC SPE)" - depends on PPC && SPE - select CRYPTO_SKCIPHER - help - AES cipher algorithms (FIPS-197). Additionally the acceleration - for popular block cipher modes ECB, CBC, CTR and XTS is supported. - This module should only be used for low power (router) devices - without hardware AES acceleration (e.g. caam crypto). It reduces the - size of the AES tables from 16KB to 8KB + 256 bytes and mitigates - timining attacks. Nevertheless it might be not as secure as other - architecture specific assembler implementations that work on 1KB - tables or 256 bytes S-boxes. - -config CRYPTO_ANUBIS - tristate "Anubis cipher algorithm" - depends on CRYPTO_USER_API_ENABLE_OBSOLETE - select CRYPTO_ALGAPI - help - Anubis cipher algorithm. - - Anubis is a variable key length cipher which can use keys from - 128 bits to 320 bits in length. It was evaluated as a entrant - in the NESSIE competition. - - See also: - <https://www.cosic.esat.kuleuven.be/nessie/reports/> - <http://www.larc.usp.br/~pbarreto/AnubisPage.html> - -config CRYPTO_ARC4 - tristate "ARC4 cipher algorithm" - depends on CRYPTO_USER_API_ENABLE_OBSOLETE - select CRYPTO_SKCIPHER - select CRYPTO_LIB_ARC4 - help - ARC4 cipher algorithm. - - ARC4 is a stream cipher using keys ranging from 8 bits to 2048 - bits in length. This algorithm is required for driver-based - WEP, but it should not be for other purposes because of the - weakness of the algorithm. - -config CRYPTO_BLOWFISH - tristate "Blowfish cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_BLOWFISH_COMMON - help - Blowfish cipher algorithm, by Bruce Schneier. - - This is a variable key length cipher which can use keys from 32 - bits to 448 bits in length. It's fast, simple and specifically - designed for use on "large microprocessors". - - See also: - <https://www.schneier.com/blowfish.html> - -config CRYPTO_BLOWFISH_COMMON - tristate - help - Common parts of the Blowfish cipher algorithm shared by the - generic c and the assembler implementations. - - See also: - <https://www.schneier.com/blowfish.html> - -config CRYPTO_BLOWFISH_X86_64 - tristate "Blowfish cipher algorithm (x86_64)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_BLOWFISH_COMMON - imply CRYPTO_CTR - help - Blowfish cipher algorithm (x86_64), by Bruce Schneier. - - This is a variable key length cipher which can use keys from 32 - bits to 448 bits in length. It's fast, simple and specifically - designed for use on "large microprocessors". - - See also: - <https://www.schneier.com/blowfish.html> - -config CRYPTO_CAMELLIA - tristate "Camellia cipher algorithms" - select CRYPTO_ALGAPI - help - Camellia cipher algorithms module. - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_X86_64 - tristate "Camellia cipher algorithm (x86_64)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - imply CRYPTO_CTR - help - Camellia cipher algorithm module (x86_64). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_CAMELLIA_X86_64 - select CRYPTO_SIMD - imply CRYPTO_XTS - help - Camellia cipher algorithm module (x86_64/AES-NI/AVX). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 - tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" - depends on X86 && 64BIT - select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 - help - Camellia cipher algorithm module (x86_64/AES-NI/AVX2). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAMELLIA_SPARC64 - tristate "Camellia cipher algorithm (SPARC64)" - depends on SPARC64 - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - help - Camellia cipher algorithm module (SPARC64). - - Camellia is a symmetric key block cipher developed jointly - at NTT and Mitsubishi Electric Corporation. - - The Camellia specifies three key sizes: 128, 192 and 256 bits. - - See also: - <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html> - -config CRYPTO_CAST_COMMON - tristate - help - Common parts of the CAST cipher algorithms shared by the - generic c and the assembler implementations. - -config CRYPTO_CAST5 - tristate "CAST5 (CAST-128) cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_CAST_COMMON - help - The CAST5 encryption algorithm (synonymous with CAST-128) is - described in RFC2144. - -config CRYPTO_CAST5_AVX_X86_64 - tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_CAST5 - select CRYPTO_CAST_COMMON - select CRYPTO_SIMD - imply CRYPTO_CTR - help - The CAST5 encryption algorithm (synonymous with CAST-128) is - described in RFC2144. - - This module provides the Cast5 cipher algorithm that processes - sixteen blocks parallel using the AVX instruction set. - -config CRYPTO_CAST6 - tristate "CAST6 (CAST-256) cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_CAST_COMMON - help - The CAST6 encryption algorithm (synonymous with CAST-256) is - described in RFC2612. - -config CRYPTO_CAST6_AVX_X86_64 - tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_CAST6 - select CRYPTO_CAST_COMMON - select CRYPTO_SIMD - imply CRYPTO_XTS - imply CRYPTO_CTR - help - The CAST6 encryption algorithm (synonymous with CAST-256) is - described in RFC2612. - - This module provides the Cast6 cipher algorithm that processes - eight blocks parallel using the AVX instruction set. - -config CRYPTO_DES - tristate "DES and Triple DES EDE cipher algorithms" - select CRYPTO_ALGAPI - select CRYPTO_LIB_DES - help - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). - -config CRYPTO_DES_SPARC64 - tristate "DES and Triple DES EDE cipher algorithms (SPARC64)" - depends on SPARC64 - select CRYPTO_ALGAPI - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - help - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), - optimized using SPARC64 crypto opcodes. - -config CRYPTO_DES3_EDE_X86_64 - tristate "Triple DES EDE cipher algorithm (x86-64)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - imply CRYPTO_CTR - help - Triple DES EDE (FIPS 46-3) algorithm. - - This module provides implementation of the Triple DES EDE cipher - algorithm that is optimized for x86-64 processors. Two versions of - algorithm are provided; regular processing one input block and - one that processes three blocks parallel. - -config CRYPTO_FCRYPT - tristate "FCrypt cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - help - FCrypt algorithm used by RxRPC. - -config CRYPTO_KHAZAD - tristate "Khazad cipher algorithm" - depends on CRYPTO_USER_API_ENABLE_OBSOLETE - select CRYPTO_ALGAPI - help - Khazad cipher algorithm. - - Khazad was a finalist in the initial NESSIE competition. It is - an algorithm optimized for 64-bit processors with good performance - on 32-bit processors. Khazad uses an 128 bit key size. - - See also: - <http://www.larc.usp.br/~pbarreto/KhazadPage.html> - -config CRYPTO_CHACHA20 - tristate "ChaCha stream cipher algorithms" - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_SKCIPHER - help - The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms. - - ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. - Bernstein and further specified in RFC7539 for use in IETF protocols. - This is the portable C implementation of ChaCha20. See also: - <https://cr.yp.to/chacha/chacha-20080128.pdf> - - XChaCha20 is the application of the XSalsa20 construction to ChaCha20 - rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length - from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits, - while provably retaining ChaCha20's security. See also: - <https://cr.yp.to/snuffle/xsalsa-20081128.pdf> - - XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly - reduced security margin but increased performance. It can be needed - in some performance-sensitive scenarios. - -config CRYPTO_CHACHA20_X86_64 - tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - help - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, - XChaCha20, and XChaCha12 stream ciphers. - -config CRYPTO_CHACHA_MIPS - tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)" - depends on CPU_MIPS32_R2 - select CRYPTO_SKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - -config CRYPTO_SEED - tristate "SEED cipher algorithm" - depends on CRYPTO_USER_API_ENABLE_OBSOLETE - select CRYPTO_ALGAPI - help - SEED cipher algorithm (RFC4269). - - SEED is a 128-bit symmetric key block cipher that has been - developed by KISA (Korea Information Security Agency) as a - national standard encryption algorithm of the Republic of Korea. - It is a 16 round block cipher with the key size of 128 bit. - - See also: - <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp> - -config CRYPTO_SERPENT - tristate "Serpent cipher algorithm" - select CRYPTO_ALGAPI - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - See also: - <https://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_SSE2_X86_64 - tristate "Serpent cipher algorithm (x86_64/SSE2)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_SERPENT - select CRYPTO_SIMD - imply CRYPTO_CTR - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes eight - blocks parallel using SSE2 instruction set. - - See also: - <https://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_SSE2_586 - tristate "Serpent cipher algorithm (i586/SSE2)" - depends on X86 && !64BIT - select CRYPTO_SKCIPHER - select CRYPTO_SERPENT - select CRYPTO_SIMD - imply CRYPTO_CTR - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes four - blocks parallel using SSE2 instruction set. - - See also: - <https://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_AVX_X86_64 - tristate "Serpent cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_SERPENT - select CRYPTO_SIMD - imply CRYPTO_XTS - imply CRYPTO_CTR - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides the Serpent cipher algorithm that processes - eight blocks parallel using the AVX instruction set. - - See also: - <https://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SERPENT_AVX2_X86_64 - tristate "Serpent cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT - select CRYPTO_SERPENT_AVX_X86_64 - help - Serpent cipher algorithm, by Anderson, Biham & Knudsen. - - Keys are allowed to be from 0 to 256 bits in length, in steps - of 8 bits. - - This module provides Serpent cipher algorithm that processes 16 - blocks parallel using AVX2 instruction set. - - See also: - <https://www.cl.cam.ac.uk/~rja14/serpent.html> - -config CRYPTO_SM4 - tristate "SM4 cipher algorithm" - select CRYPTO_ALGAPI - help - SM4 cipher algorithms (OSCCA GB/T 32907-2016). - - SM4 (GBT.32907-2016) is a cryptographic standard issued by the - Organization of State Commercial Administration of China (OSCCA) - as an authorized cryptographic algorithms for the use within China. - - SMS4 was originally created for use in protecting wireless - networks, and is mandated in the Chinese National Standard for - Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure) - (GB.15629.11-2003). - - The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and - standardized through TC 260 of the Standardization Administration - of the People's Republic of China (SAC). - - The input, output, and key of SMS4 are each 128 bits. - - See also: <https://eprint.iacr.org/2008/329.pdf> - - If unsure, say N. - -config CRYPTO_TEA - tristate "TEA, XTEA and XETA cipher algorithms" - depends on CRYPTO_USER_API_ENABLE_OBSOLETE - select CRYPTO_ALGAPI - help - TEA cipher algorithm. - - Tiny Encryption Algorithm is a simple cipher that uses - many rounds for security. It is very fast and uses - little memory. - - Xtendend Tiny Encryption Algorithm is a modification to - the TEA algorithm to address a potential key weakness - in the TEA algorithm. + Whirlpool-512 is part of the NESSIE cryptographic primitives. - Xtendend Encryption Tiny Algorithm is a mis-implementation - of the XTEA algorithm for compatibility purposes. + See https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html + for further information. -config CRYPTO_TWOFISH - tristate "Twofish cipher algorithm" - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON +config CRYPTO_XCBC + tristate "XCBC-MAC (Extended Cipher Block Chaining MAC)" + select CRYPTO_HASH + select CRYPTO_MANAGER help - Twofish cipher algorithm. + XCBC-MAC (Extended Cipher Block Chaining Message Authentication + Code) (RFC3566) - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. - - See also: - <https://www.schneier.com/twofish.html> - -config CRYPTO_TWOFISH_COMMON - tristate +config CRYPTO_XXHASH + tristate "xxHash" + select CRYPTO_HASH + select XXHASH help - Common parts of the Twofish cipher algorithm shared by the - generic c and the assembler implementations. + xxHash non-cryptographic hash algorithm -config CRYPTO_TWOFISH_586 - tristate "Twofish cipher algorithms (i586)" - depends on (X86 || UML_X86) && !64BIT - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON - imply CRYPTO_CTR - help - Twofish cipher algorithm. + Extremely fast, working at speeds close to RAM limits. - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + Used by the btrfs filesystem. - See also: - <https://www.schneier.com/twofish.html> +endmenu -config CRYPTO_TWOFISH_X86_64 - tristate "Twofish cipher algorithm (x86_64)" - depends on (X86 || UML_X86) && 64BIT - select CRYPTO_ALGAPI - select CRYPTO_TWOFISH_COMMON - imply CRYPTO_CTR - help - Twofish cipher algorithm (x86_64). +menu "CRCs (cyclic redundancy checks)" - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. - - See also: - <https://www.schneier.com/twofish.html> - -config CRYPTO_TWOFISH_X86_64_3WAY - tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_TWOFISH_COMMON - select CRYPTO_TWOFISH_X86_64 +config CRYPTO_CRC32C + tristate "CRC32c" + select CRYPTO_HASH + select CRC32 help - Twofish cipher algorithm (x86_64, 3-way parallel). + CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + A 32-bit CRC (cyclic redundancy check) with a polynomial defined + by G. Castagnoli, S. Braeuer and M. Herrman in "Optimization of Cyclic + Redundancy-Check Codes with 24 and 32 Parity Bits", IEEE Transactions + on Communications, Vol. 41, No. 6, June 1993, selected for use with + iSCSI. - This module provides Twofish cipher algorithm that processes three - blocks parallel, utilizing resources of out-of-order CPUs better. + Used by btrfs, ext4, jbd2, NVMeoF/TCP, and iSCSI. - See also: - <https://www.schneier.com/twofish.html> - -config CRYPTO_TWOFISH_AVX_X86_64 - tristate "Twofish cipher algorithm (x86_64/AVX)" - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_SIMD - select CRYPTO_TWOFISH_COMMON - select CRYPTO_TWOFISH_X86_64 - select CRYPTO_TWOFISH_X86_64_3WAY - imply CRYPTO_XTS +config CRYPTO_CRC32 + tristate "CRC32" + select CRYPTO_HASH + select CRC32 help - Twofish cipher algorithm (x86_64/AVX). - - Twofish was submitted as an AES (Advanced Encryption Standard) - candidate cipher by researchers at CounterPane Systems. It is a - 16 round block cipher supporting key sizes of 128, 192, and 256 - bits. + CRC32 CRC algorithm (IEEE 802.3) - This module provides the Twofish cipher algorithm that processes - eight blocks parallel using the AVX Instruction Set. + Used by RoCEv2 and f2fs. - See also: - <https://www.schneier.com/twofish.html> +endmenu -comment "Compression" +menu "Compression" config CRYPTO_DEFLATE - tristate "Deflate compression algorithm" + tristate "Deflate" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select ZLIB_INFLATE select ZLIB_DEFLATE help - This is the Deflate algorithm (RFC1951), specified for use in - IPSec with the IPCOMP protocol (RFC3173, RFC2394). + Deflate compression algorithm (RFC1951) - You will most probably want this if using IPSec. + Used by IPSec with the IPCOMP protocol (RFC3173, RFC2394) config CRYPTO_LZO - tristate "LZO compression algorithm" + tristate "LZO" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZO_COMPRESS select LZO_DECOMPRESS help - This is the LZO algorithm. + LZO compression algorithm + + See https://www.oberhumer.com/opensource/lzo/ for further information. config CRYPTO_842 - tristate "842 compression algorithm" + tristate "842" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select 842_COMPRESS select 842_DECOMPRESS help - This is the 842 algorithm. + 842 compression algorithm by IBM + + See https://github.com/plauth/lib842 for further information. config CRYPTO_LZ4 - tristate "LZ4 compression algorithm" + tristate "LZ4" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZ4_COMPRESS select LZ4_DECOMPRESS help - This is the LZ4 algorithm. + LZ4 compression algorithm + + See https://github.com/lz4/lz4 for further information. config CRYPTO_LZ4HC - tristate "LZ4HC compression algorithm" + tristate "LZ4HC" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select LZ4HC_COMPRESS select LZ4_DECOMPRESS help - This is the LZ4 high compression mode algorithm. + LZ4 high compression mode algorithm + + See https://github.com/lz4/lz4 for further information. config CRYPTO_ZSTD - tristate "Zstd compression algorithm" + tristate "Zstd" select CRYPTO_ALGAPI select CRYPTO_ACOMP2 select ZSTD_COMPRESS select ZSTD_DECOMPRESS help - This is the zstd algorithm. + zstd compression algorithm -comment "Random Number Generation" + See https://github.com/facebook/zstd for further information. -config CRYPTO_ANSI_CPRNG - tristate "Pseudo Random Number Generation for Cryptographic modules" - select CRYPTO_AES - select CRYPTO_RNG - help - This option enables the generic pseudo random number generator - for cryptographic modules. Uses the Algorithm specified in - ANSI X9.31 A.2.4. Note that this option must be enabled if - CRYPTO_FIPS is selected +endmenu + +menu "Random number generation" menuconfig CRYPTO_DRBG_MENU - tristate "NIST SP800-90A DRBG" + tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)" help - NIST SP800-90A compliant DRBG. In the following submenu, one or - more of the DRBG types must be selected. + DRBG (Deterministic Random Bit Generator) (NIST SP800-90A) + + In the following submenu, one or more of the DRBG types must be selected. if CRYPTO_DRBG_MENU @@ -1768,20 +1174,23 @@ config CRYPTO_DRBG_HMAC bool default y select CRYPTO_HMAC - select CRYPTO_SHA256 + select CRYPTO_SHA512 config CRYPTO_DRBG_HASH - bool "Enable Hash DRBG" + bool "Hash_DRBG" select CRYPTO_SHA256 help - Enable the Hash DRBG variant as defined in NIST SP800-90A. + Hash_DRBG variant as defined in NIST SP800-90A. + + This uses the SHA-1, SHA-256, SHA-384, or SHA-512 hash algorithms. config CRYPTO_DRBG_CTR - bool "Enable CTR DRBG" - select CRYPTO_AES - select CRYPTO_CTR + bool "CTR_DRBG" + select CRYPTO_DF80090A help - Enable the CTR DRBG variant as defined in NIST SP800-90A. + CTR_DRBG variant as defined in NIST SP800-90A. + + This uses the AES cipher algorithm with the counter block mode. config CRYPTO_DRBG tristate @@ -1792,67 +1201,198 @@ config CRYPTO_DRBG endif # if CRYPTO_DRBG_MENU config CRYPTO_JITTERENTROPY - tristate "Jitterentropy Non-Deterministic Random Number Generator" + tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)" select CRYPTO_RNG - help - The Jitterentropy RNG is a noise that is intended - to provide seed to another RNG. The RNG does not - perform any cryptographic whitening of the generated - random numbers. This Jitterentropy RNG registers with - the kernel crypto API and can be used by any caller. + select CRYPTO_SHA3 + help + CPU Jitter RNG (Random Number Generator) from the Jitterentropy library + + A non-physical non-deterministic ("true") RNG (e.g., an entropy source + compliant with NIST SP800-90B) intended to provide a seed to a + deterministic RNG (e.g., per NIST SP800-90C). + This RNG does not perform any cryptographic whitening of the generated + random numbers. + + See https://www.chronox.de/jent/ + +if CRYPTO_JITTERENTROPY +if CRYPTO_FIPS && EXPERT + +choice + prompt "CPU Jitter RNG Memory Size" + default CRYPTO_JITTERENTROPY_MEMSIZE_2 + help + The Jitter RNG measures the execution time of memory accesses. + Multiple consecutive memory accesses are performed. If the memory + size fits into a cache (e.g. L1), only the memory access timing + to that cache is measured. The closer the cache is to the CPU + the less variations are measured and thus the less entropy is + obtained. Thus, if the memory size fits into the L1 cache, the + obtained entropy is less than if the memory size fits within + L1 + L2, which in turn is less if the memory fits into + L1 + L2 + L3. Thus, by selecting a different memory size, + the entropy rate produced by the Jitter RNG can be modified. + + config CRYPTO_JITTERENTROPY_MEMSIZE_2 + bool "2048 Bytes (default)" + + config CRYPTO_JITTERENTROPY_MEMSIZE_128 + bool "128 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_1024 + bool "1024 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_8192 + bool "8192 kBytes" +endchoice + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS + int + default 64 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 512 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 4096 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE + int + default 32 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 256 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + +config CRYPTO_JITTERENTROPY_OSR + int "CPU Jitter RNG Oversampling Rate" + range 1 15 + default 3 + help + The Jitter RNG allows the specification of an oversampling rate (OSR). + The Jitter RNG operation requires a fixed amount of timing + measurements to produce one output block of random numbers. The + OSR value is multiplied with the amount of timing measurements to + generate one output block. Thus, the timing measurement is oversampled + by the OSR factor. The oversampling allows the Jitter RNG to operate + on hardware whose timers deliver limited amount of entropy (e.g. + the timer is coarse) by setting the OSR to a higher value. The + trade-off, however, is that the Jitter RNG now requires more time + to generate random numbers. + +config CRYPTO_JITTERENTROPY_TESTINTERFACE + bool "CPU Jitter RNG Test Interface" + help + The test interface allows a privileged process to capture + the raw unconditioned high resolution time stamp noise that + is collected by the Jitter RNG for statistical analysis. As + this data is used at the same time to generate random bits, + the Jitter RNG operates in an insecure mode as long as the + recording is enabled. This interface therefore is only + intended for testing purposes and is not suitable for + production systems. + + The raw noise data can be obtained using the jent_raw_hires + debugfs file. Using the option + jitterentropy_testing.boot_raw_hires_test=1 the raw noise of + the first 1000 entropy events since boot can be sampled. + + If unsure, select N. + +endif # if CRYPTO_FIPS && EXPERT + +if !(CRYPTO_FIPS && EXPERT) + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS + int + default 64 + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE + int + default 32 + +config CRYPTO_JITTERENTROPY_OSR + int + default 1 + +config CRYPTO_JITTERENTROPY_TESTINTERFACE + bool + +endif # if !(CRYPTO_FIPS && EXPERT) +endif # if CRYPTO_JITTERENTROPY + +config CRYPTO_KDF800108_CTR + tristate + select CRYPTO_HMAC + select CRYPTO_SHA256 + +config CRYPTO_DF80090A + tristate + select CRYPTO_AES + select CRYPTO_CTR + +endmenu +menu "Userspace interface" config CRYPTO_USER_API tristate config CRYPTO_USER_API_HASH - tristate "User-space interface for hash algorithms" + tristate "Hash algorithms" depends on NET select CRYPTO_HASH select CRYPTO_USER_API help - This option enables the user-spaces interface for hash - algorithms. + Enable the userspace interface for hash algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_SKCIPHER - tristate "User-space interface for symmetric key cipher algorithms" + tristate "Symmetric key cipher algorithms" depends on NET select CRYPTO_SKCIPHER select CRYPTO_USER_API help - This option enables the user-spaces interface for symmetric - key cipher algorithms. + Enable the userspace interface for symmetric key cipher algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_RNG - tristate "User-space interface for random number generator algorithms" + tristate "RNG (random number generator) algorithms" depends on NET select CRYPTO_RNG select CRYPTO_USER_API help - This option enables the user-spaces interface for random - number generator algorithms. + Enable the userspace interface for RNG (random number generator) + algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_RNG_CAVP bool "Enable CAVP testing of DRBG" depends on CRYPTO_USER_API_RNG && CRYPTO_DRBG help - This option enables extra API for CAVP testing via the user-space - interface: resetting of DRBG entropy, and providing Additional Data. + Enable extra APIs in the userspace interface for NIST CAVP + (Cryptographic Algorithm Validation Program) testing: + - resetting DRBG entropy + - providing Additional Data + This should only be enabled for CAVP testing. You should say no unless you know what this is. config CRYPTO_USER_API_AEAD - tristate "User-space interface for AEAD cipher algorithms" + tristate "AEAD cipher algorithms" depends on NET select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_NULL select CRYPTO_USER_API help - This option enables the user-spaces interface for AEAD - cipher algorithms. + Enable the userspace interface for AEAD cipher algorithms. + + See Documentation/crypto/userspace-if.rst and + https://www.chronox.de/libkcapi/html/index.html config CRYPTO_USER_API_ENABLE_OBSOLETE - bool "Enable obsolete cryptographic algorithms for userspace" + bool "Obsolete cryptographic algorithms" depends on CRYPTO_USER_API default y help @@ -1860,24 +1400,41 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE already been phased out from internal use by the kernel, and are only useful for userspace clients that still rely on them. -config CRYPTO_STATS - bool "Crypto usage statistics for User-space" - depends on CRYPTO_USER - help - This option enables the gathering of crypto stats. - This will collect: - - encrypt/decrypt size and numbers of symmeric operations - - compress/decompress size and numbers of compress operations - - size and numbers of hash operations - - encrypt/decrypt/sign/verify numbers for asymmetric operations - - generate/seed numbers for rng operations - -config CRYPTO_HASH_INFO - bool +endmenu + +if !KMSAN # avoid false positives from assembly +if ARM +source "arch/arm/crypto/Kconfig" +endif +if ARM64 +source "arch/arm64/crypto/Kconfig" +endif +if LOONGARCH +source "arch/loongarch/crypto/Kconfig" +endif +if MIPS +source "arch/mips/crypto/Kconfig" +endif +if PPC +source "arch/powerpc/crypto/Kconfig" +endif +if RISCV +source "arch/riscv/crypto/Kconfig" +endif +if S390 +source "arch/s390/crypto/Kconfig" +endif +if SPARC +source "arch/sparc/crypto/Kconfig" +endif +if X86 +source "arch/x86/crypto/Kconfig" +endif +endif -source "lib/crypto/Kconfig" source "drivers/crypto/Kconfig" source "crypto/asymmetric_keys/Kconfig" source "certs/Kconfig" +source "crypto/krb5/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index 10526d4559b8..16a35649dd91 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o memneq.o +crypto-y := api.o cipher.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o @@ -14,9 +14,16 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o -obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o +obj-$(CONFIG_CRYPTO_GENIV) += geniv.o + +crypto_skcipher-y += lskcipher.o +crypto_skcipher-y += skcipher.o + +obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o +ifeq ($(CONFIG_BPF_SYSCALL),y) +obj-$(CONFIG_CRYPTO_SKCIPHER2) += bpf_crypto_skcipher.o +endif -obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o @@ -25,7 +32,9 @@ crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o +obj-$(CONFIG_CRYPTO_SIG2) += sig.o obj-$(CONFIG_CRYPTO_KPP2) += kpp.o +obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o dh_generic-y := dh.o dh_generic-y += dh_helper.o @@ -40,19 +49,14 @@ rsa_generic-y += rsaprivkey.asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o rsa_generic-y += rsa-pkcs1pad.o +rsa_generic-y += rsassa-pkcs1.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o -$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h -$(obj)/sm2.o: $(obj)/sm2signature.asn1.h - -sm2_generic-y += sm2signature.asn1.o -sm2_generic-y += sm2.o - -obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o - $(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h -$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h +$(obj)/ecdsa-x962.o: $(obj)/ecdsasignature.asn1.h ecdsa_generic-y += ecdsa.o +ecdsa_generic-y += ecdsa-x962.o +ecdsa_generic-y += ecdsa-p1363.o ecdsa_generic-y += ecdsasignature.asn1.o obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o @@ -64,37 +68,31 @@ cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_USER) += crypto_user.o -crypto_user-y := crypto_user_base.o -crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o -obj-$(CONFIG_CRYPTO_VMAC) += vmac.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o -obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o +obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o -obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o -obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o -obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o -obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o -obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o +obj-$(CONFIG_CRYPTO_SHA1) += sha1.o +obj-$(CONFIG_CRYPTO_SHA256) += sha256.o +obj-$(CONFIG_CRYPTO_SHA512) += sha512.o +obj-$(CONFIG_CRYPTO_SHA3) += sha3.o +obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 -obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o -obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o -obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o +obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o -obj-$(CONFIG_CRYPTO_CFB) += cfb.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o -obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o +obj-$(CONFIG_CRYPTO_XCTR) += xctr.o +obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o @@ -120,6 +118,8 @@ CFLAGS_aegis128-neon-inner.o += $(aegis128-cflags-y) CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o endif +# Enable <arm_neon.h> +CFLAGS_aegis128-neon-inner.o += -isystem $(shell $(CC) -print-file-name=include) obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o @@ -133,7 +133,8 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 -obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o +obj-$(CONFIG_CRYPTO_SM4) += sm4.o +obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o @@ -144,28 +145,31 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o -obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o -obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o +obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o +obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o +CFLAGS_chacha.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o -obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o -obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o +crc32c-cryptoapi-y := crc32c.o +obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o +crc32-cryptoapi-y := crc32.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o +obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o obj-$(CONFIG_CRYPTO_842) += 842.o obj-$(CONFIG_CRYPTO_RNG2) += rng.o -obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_DRBG) += drbg.o obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o CFLAGS_jitterentropy.o = -O0 KASAN_SANITIZE_jitterentropy.o = n UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o -obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o +obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o +obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o @@ -173,10 +177,8 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o -obj-$(CONFIG_CRYPTO_OFB) += ofb.o obj-$(CONFIG_CRYPTO_ECC) += ecc.o obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o -obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o ecdh_generic-y += ecdh.o ecdh_generic-y += ecdh_helper.o @@ -196,6 +198,14 @@ obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o obj-$(CONFIG_XOR_BLOCKS) += xor.o obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ -obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o crypto_simd-y := simd.o obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o + +# +# Key derivation function +# +obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o + +obj-$(CONFIG_CRYPTO_DF80090A) += df_sp80090a.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5/ diff --git a/crypto/acompress.c b/crypto/acompress.c index c32c72048a1c..be28cbfd22e3 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -6,25 +6,50 @@ * Authors: Weigang Li <weigang.li@intel.com> * Giovanni Cabiddu <giovanni.cabiddu@intel.com> */ -#include <linux/errno.h> + +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> +#include <linux/cryptouser.h> +#include <linux/cpumask.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/string.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> -#include <linux/cryptouser.h> -#include <linux/compiler.h> +#include <linux/workqueue.h> #include <net/netlink.h> -#include <crypto/internal/acompress.h> -#include <crypto/internal/scompress.h> -#include "internal.h" + +#include "compress.h" + +struct crypto_scomp; + +enum { + ACOMP_WALK_SLEEP = 1 << 0, + ACOMP_WALK_SRC_LINEAR = 1 << 1, + ACOMP_WALK_DST_LINEAR = 1 << 2, +}; static const struct crypto_type crypto_acomp_type; -#ifdef CONFIG_NET -static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) +static void acomp_reqchain_done(void *data, int err); + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct acomp_alg, calg.base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static int __maybe_unused crypto_acomp_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_acomp racomp; @@ -34,12 +59,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp); } -#else -static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -54,29 +73,54 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); - alg->exit(acomp); + if (alg->exit) + alg->exit(acomp); + + if (acomp_is_async(acomp)) + crypto_free_acomp(crypto_acomp_fb(acomp)); } static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) { struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); + struct crypto_acomp *fb = NULL; + int err; if (tfm->__crt_alg->cra_type != &crypto_acomp_type) return crypto_init_scomp_ops_async(tfm); + if (acomp_is_async(acomp)) { + fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + err = -EINVAL; + if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) + goto out_free_fb; + + tfm->fb = crypto_acomp_tfm(fb); + } + acomp->compress = alg->compress; acomp->decompress = alg->decompress; - acomp->dst_free = alg->dst_free; - acomp->reqsize = alg->reqsize; + acomp->reqsize = alg->base.cra_reqsize; - if (alg->exit) - acomp->base.exit = crypto_acomp_exit_tfm; + acomp->base.exit = crypto_acomp_exit_tfm; + + if (!alg->init) + return 0; - if (alg->init) - return alg->init(acomp); + err = alg->init(acomp); + if (err) + goto out_free_fb; return 0; + +out_free_fb: + crypto_free_acomp(fb); + return err; } static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) @@ -95,11 +139,14 @@ static const struct crypto_type crypto_acomp_type = { #ifdef CONFIG_PROC_FS .show = crypto_acomp_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_acomp_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, .type = CRYPTO_ALG_TYPE_ACOMPRESS, .tfmsize = offsetof(struct crypto_acomp, base), + .algsize = offsetof(struct acomp_alg, base), }; struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, @@ -117,42 +164,161 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); -struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) +static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) { - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct acomp_req *req; + struct acomp_req_chain *state = &req->chain; + + state->compl = req->base.complete; + state->data = req->base.data; + req->base.complete = cplt; + req->base.data = state; +} - req = __acomp_request_alloc(acomp); - if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) - return crypto_acomp_scomp_alloc_ctx(req); +static void acomp_restore_req(struct acomp_req *req) +{ + struct acomp_req_chain *state = req->base.data; - return req; + req->base.complete = state->compl; + req->base.data = state->data; } -EXPORT_SYMBOL_GPL(acomp_request_alloc); -void acomp_request_free(struct acomp_req *req) +static void acomp_reqchain_virt(struct acomp_req *req) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct acomp_req_chain *state = &req->chain; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + + if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) + acomp_request_set_src_dma(req, state->src, slen); + if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) + acomp_request_set_dst_dma(req, state->dst, dlen); +} - if (tfm->__crt_alg->cra_type != &crypto_acomp_type) - crypto_acomp_scomp_free_ctx(req); +static void acomp_virt_to_sg(struct acomp_req *req) +{ + struct acomp_req_chain *state = &req->chain; - if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { - acomp->dst_free(req->dst); - req->dst = NULL; + state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); + + if (acomp_request_src_isvirt(req)) { + unsigned int slen = req->slen; + const u8 *svirt = req->svirt; + + state->src = svirt; + sg_init_one(&state->ssg, svirt, slen); + acomp_request_set_src_sg(req, &state->ssg, slen); } - __acomp_request_free(req); + if (acomp_request_dst_isvirt(req)) { + unsigned int dlen = req->dlen; + u8 *dvirt = req->dvirt; + + state->dst = dvirt; + sg_init_one(&state->dsg, dvirt, dlen); + acomp_request_set_dst_sg(req, &state->dsg, dlen); + } } -EXPORT_SYMBOL_GPL(acomp_request_free); -int crypto_register_acomp(struct acomp_alg *alg) +static int acomp_do_nondma(struct acomp_req *req, bool comp) +{ + ACOMP_FBREQ_ON_STACK(fbreq, req); + int err; + + if (comp) + err = crypto_acomp_compress(fbreq); + else + err = crypto_acomp_decompress(fbreq); + + req->dlen = fbreq->dlen; + return err; +} + +static int acomp_do_one_req(struct acomp_req *req, bool comp) +{ + if (acomp_request_isnondma(req)) + return acomp_do_nondma(req, comp); + + acomp_virt_to_sg(req); + return comp ? crypto_acomp_reqtfm(req)->compress(req) : + crypto_acomp_reqtfm(req)->decompress(req); +} + +static int acomp_reqchain_finish(struct acomp_req *req, int err) +{ + acomp_reqchain_virt(req); + acomp_restore_req(req); + return err; +} + +static void acomp_reqchain_done(void *data, int err) +{ + struct acomp_req *req = data; + crypto_completion_t compl; + + compl = req->chain.compl; + data = req->chain.data; + + if (err == -EINPROGRESS) + goto notify; + + err = acomp_reqchain_finish(req, err); + +notify: + compl(data, err); +} + +static int acomp_do_req_chain(struct acomp_req *req, bool comp) +{ + int err; + + acomp_save_req(req, acomp_reqchain_done); + + err = acomp_do_one_req(req, comp); + if (err == -EBUSY || err == -EINPROGRESS) + return err; + + return acomp_reqchain_finish(req, err); +} + +int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->compress(req); + return acomp_do_req_chain(req, true); +} +EXPORT_SYMBOL_GPL(crypto_acomp_compress); + +int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->decompress(req); + return acomp_do_req_chain(req, false); +} +EXPORT_SYMBOL_GPL(crypto_acomp_decompress); + +void comp_prepare_alg(struct comp_alg_common *alg) { struct crypto_alg *base = &alg->base; - base->cra_type = &crypto_acomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; +} + +int crypto_register_acomp(struct acomp_alg *alg) +{ + struct crypto_alg *base = &alg->calg.base; + + comp_prepare_alg(&alg->calg); + + base->cra_type = &crypto_acomp_type; base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; return crypto_register_alg(base); @@ -194,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count) } EXPORT_SYMBOL_GPL(crypto_unregister_acomps); +static void acomp_stream_workfn(struct work_struct *work) +{ + struct crypto_acomp_streams *s = + container_of(work, struct crypto_acomp_streams, stream_work); + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu; + + for_each_cpu(cpu, &s->stream_want) { + struct crypto_acomp_stream *ps; + void *ctx; + + ps = per_cpu_ptr(streams, cpu); + if (ps->ctx) + continue; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) + break; + + spin_lock_bh(&ps->lock); + ps->ctx = ctx; + spin_unlock_bh(&ps->lock); + + cpumask_clear_cpu(cpu, &s->stream_want); + } +} + +void crypto_acomp_free_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + void (*free_ctx)(void *); + int i; + + s->streams = NULL; + if (!streams) + return; + + cancel_work_sync(&s->stream_work); + free_ctx = s->free_ctx; + + for_each_possible_cpu(i) { + struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i); + + if (!ps->ctx) + continue; + + free_ctx(ps->ctx); + } + + free_percpu(streams); +} +EXPORT_SYMBOL_GPL(crypto_acomp_free_streams); + +int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams; + struct crypto_acomp_stream *ps; + unsigned int i; + void *ctx; + + if (s->streams) + return 0; + + streams = alloc_percpu(struct crypto_acomp_stream); + if (!streams) + return -ENOMEM; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) { + free_percpu(streams); + return PTR_ERR(ctx); + } + + i = cpumask_first(cpu_possible_mask); + ps = per_cpu_ptr(streams, i); + ps->ctx = ctx; + + for_each_possible_cpu(i) { + ps = per_cpu_ptr(streams, i); + spin_lock_init(&ps->lock); + } + + s->streams = streams; + + INIT_WORK(&s->stream_work, acomp_stream_workfn); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams); + +struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( + struct crypto_acomp_streams *s) __acquires(stream) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu = raw_smp_processor_id(); + struct crypto_acomp_stream *ps; + + ps = per_cpu_ptr(streams, cpu); + spin_lock_bh(&ps->lock); + if (likely(ps->ctx)) + return ps; + spin_unlock(&ps->lock); + + cpumask_set_cpu(cpu, &s->stream_want); + schedule_work(&s->stream_work); + + ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask)); + spin_lock(&ps->lock); + return ps; +} +EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh); + +void acomp_walk_done_src(struct acomp_walk *walk, int used) +{ + walk->slen -= used; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) + scatterwalk_advance(&walk->in, used); + else + scatterwalk_done_src(&walk->in, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_src); + +void acomp_walk_done_dst(struct acomp_walk *walk, int used) +{ + walk->dlen -= used; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) + scatterwalk_advance(&walk->out, used); + else + scatterwalk_done_dst(&walk->out, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_dst); + +int acomp_walk_next_src(struct acomp_walk *walk) +{ + unsigned int slen = walk->slen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.__addr = (void *)(((u8 *)walk->in.sg) + + walk->in.offset); + return min(slen, max); + } + + return slen ? scatterwalk_next(&walk->in, slen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_src); + +int acomp_walk_next_dst(struct acomp_walk *walk) +{ + unsigned int dlen = walk->dlen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.__addr = (void *)(((u8 *)walk->out.sg) + + walk->out.offset); + return min(dlen, max); + } + + return dlen ? scatterwalk_next(&walk->out, dlen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_dst); + +int acomp_walk_virt(struct acomp_walk *__restrict walk, + struct acomp_req *__restrict req, bool atomic) +{ + struct scatterlist *src = req->src; + struct scatterlist *dst = req->dst; + + walk->slen = req->slen; + walk->dlen = req->dlen; + + if (!walk->slen || !walk->dlen) + return -EINVAL; + + walk->flags = 0; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags |= ACOMP_WALK_SLEEP; + if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT)) + walk->flags |= ACOMP_WALK_SRC_LINEAR; + if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT)) + walk->flags |= ACOMP_WALK_DST_LINEAR; + + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.sg = (void *)req->svirt; + walk->in.offset = 0; + } else + scatterwalk_start(&walk->in, src); + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.sg = (void *)req->dvirt; + walk->out.offset = 0; + } else + scatterwalk_start(&walk->out, dst); + + return 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_virt); + +struct acomp_req *acomp_request_clone(struct acomp_req *req, + size_t total, gfp_t gfp) +{ + struct acomp_req *nreq; + + nreq = container_of(crypto_request_clone(&req->base, total, gfp), + struct acomp_req, base); + if (nreq == req) + return req; + + if (req->src == &req->chain.ssg) + nreq->src = &nreq->chain.ssg; + if (req->dst == &req->chain.dsg) + nreq->dst = &nreq->chain.dsg; + return nreq; +} +EXPORT_SYMBOL_GPL(acomp_request_clone); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous compression type"); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 84450130cb6b..a6bca877c3c7 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req) /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ static int adiantum_hash_message(struct skcipher_request *req, - struct scatterlist *sgl, le128 *digest) + struct scatterlist *sgl, unsigned int nents, + le128 *digest) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; struct shash_desc *hash_desc = &rctx->u.hash_desc; @@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req, unsigned int i, n; int err; - hash_desc->tfm = tctx->hash; - err = crypto_shash_init(hash_desc); if (err) return err; - sg_miter_start(&miter, sgl, sg_nents(sgl), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); for (i = 0; i < bulk_len; i += n) { sg_miter_next(&miter); n = min_t(unsigned int, miter.length, bulk_len - i); @@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *dst = req->dst; + const unsigned int dst_nents = sg_nents(dst); le128 digest; int err; @@ -298,20 +296,38 @@ static int adiantum_finish(struct skcipher_request *req) * enc: C_R = C_M - H_{K_H}(T, C_L) * dec: P_R = P_M - H_{K_H}(T, P_L) */ - err = adiantum_hash_message(req, req->dst, &digest); - if (err) - return err; - le128_add(&digest, &digest, &rctx->header_hash); - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1); + rctx->u.hash_desc.tfm = tctx->hash; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); + if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page destination */ + struct page *page = sg_page(dst); + void *virt = kmap_local_page(page) + dst->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + if (err) { + kunmap_local(virt); + return err; + } + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); + flush_dcache_page(page); + kunmap_local(virt); + } else { + /* Slow path that works for any destination scatterlist */ + err = adiantum_hash_message(req, dst, dst_nents, &digest); + if (err) + return err; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst, + bulk_len, sizeof(le128), 1); + } return 0; } -static void adiantum_streamcipher_done(struct crypto_async_request *areq, - int err) +static void adiantum_streamcipher_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (!err) err = adiantum_finish(req); @@ -325,6 +341,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *src = req->src; + const unsigned int src_nents = sg_nents(src); unsigned int stream_len; le128 digest; int err; @@ -340,12 +358,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) * dec: C_M = C_R + H_{K_H}(T, C_L) */ adiantum_hash_header(req); - err = adiantum_hash_message(req, req->src, &digest); + rctx->u.hash_desc.tfm = tctx->hash; + if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page source */ + void *virt = kmap_local_page(sg_page(src)) + src->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128)); + kunmap_local(virt); + } else { + /* Slow path that works for any source scatterlist */ + err = adiantum_hash_message(req, src, src_nents, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, src, + bulk_len, sizeof(le128), 0); + } if (err) return err; - le128_add(&digest, &digest, &rctx->header_hash); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0); + le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); /* If encrypting, encrypt P_M with the block cipher to get C_M */ @@ -469,7 +499,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst) * Check for a supported set of inner algorithms. * See the comment at the beginning of this file. */ -static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, +static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg, struct crypto_alg *blockcipher_alg, struct shash_alg *hash_alg) { @@ -495,7 +525,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) const char *nhpoly1305_name; struct skcipher_instance *inst; struct adiantum_instance_ctx *ictx; - struct skcipher_alg *streamcipher_alg; + struct skcipher_alg_common *streamcipher_alg; struct crypto_alg *blockcipher_alg; struct shash_alg *hash_alg; int err; @@ -515,7 +545,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); + streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ err = crypto_grab_cipher(&ictx->blockcipher_spawn, @@ -562,8 +592,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); - inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | - hash_alg->base.cra_alignmask; + inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask; /* * The block cipher is only invoked once per message, so for long * messages (e.g. sectors for disk encryption) its performance doesn't @@ -579,8 +608,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = adiantum_decrypt; inst->alg.init = adiantum_init_tfm; inst->alg.exit = adiantum_exit_tfm; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg); + inst->alg.min_keysize = streamcipher_alg->min_keysize; + inst->alg.max_keysize = streamcipher_alg->max_keysize; inst->alg.ivsize = TWEAK_SIZE; inst->free = adiantum_free_instance; @@ -610,11 +639,11 @@ static void __exit adiantum_module_exit(void) crypto_unregister_template(&adiantum_tmpl); } -subsys_initcall(adiantum_module_init); +module_init(adiantum_module_init); module_exit(adiantum_module_exit); MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); MODULE_ALIAS_CRYPTO("adiantum"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/aead.c b/crypto/aead.c index 16991095270d..08d44c5e5c33 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -8,13 +8,15 @@ */ #include <crypto/internal/aead.h> +#include <linux/cryptouser.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/seq_file.h> -#include <linux/cryptouser.h> +#include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "internal.h" @@ -35,8 +37,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -83,36 +84,25 @@ EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_aead_alg(aead)->encrypt(req); - crypto_stats_aead_encrypt(cryptlen, alg, ret); - return ret; + return -ENOKEY; + + return crypto_aead_alg(aead)->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_encrypt); int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (req->cryptlen < crypto_aead_authsize(aead)) - ret = -EINVAL; - else - ret = crypto_aead_alg(aead)->decrypt(req); - crypto_stats_aead_decrypt(cryptlen, alg, ret); - return ret; + return -ENOKEY; + + if (req->cryptlen < crypto_aead_authsize(aead)) + return -EINVAL; + + return crypto_aead_alg(aead)->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_decrypt); @@ -130,6 +120,7 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) struct aead_alg *alg = crypto_aead_alg(aead); crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY); + crypto_aead_set_reqsize(aead, crypto_tfm_alg_reqsize(tfm)); aead->authsize = alg->maxauthsize; @@ -142,8 +133,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) return 0; } -#ifdef CONFIG_NET -static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_aead_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_aead raead; struct aead_alg *aead = container_of(alg, struct aead_alg, base); @@ -159,12 +150,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead); } -#else -static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -173,8 +158,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) struct aead_alg *aead = container_of(alg, struct aead_alg, base); seq_printf(m, "type : aead\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); @@ -195,11 +180,14 @@ static const struct crypto_type crypto_aead_type = { #ifdef CONFIG_PROC_FS .show = crypto_aead_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_aead_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_AEAD, .tfmsize = offsetof(struct crypto_aead, base), + .algsize = offsetof(struct aead_alg, base), }; int crypto_grab_aead(struct crypto_aead_spawn *spawn, @@ -217,6 +205,31 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_aead); +struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask) +{ + struct crypto_aead *tfm; + + /* Only sync algorithms are allowed. */ + mask |= CRYPTO_ALG_ASYNC; + type &= ~(CRYPTO_ALG_ASYNC); + + tfm = crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask); + + if (!IS_ERR(tfm) && WARN_ON(crypto_aead_reqsize(tfm) > MAX_SYNC_AEAD_REQSIZE)) { + crypto_free_aead(tfm); + return ERR_PTR(-EINVAL); + } + + return (struct crypto_sync_aead *)tfm; +} +EXPORT_SYMBOL_GPL(crypto_alloc_sync_aead); + +int crypto_has_aead(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_aead); + static int aead_prepare_alg(struct aead_alg *alg) { struct crypto_alg *base = &alg->base; diff --git a/crypto/aegis-neon.h b/crypto/aegis-neon.h new file mode 100644 index 000000000000..61e5614b45de --- /dev/null +++ b/crypto/aegis-neon.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef _AEGIS_NEON_H +#define _AEGIS_NEON_H + +void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); +void crypto_aegis128_update_neon(void *state, const void *msg); +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +int crypto_aegis128_final_neon(void *state, void *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); + +#endif diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index c4f1bfa1d04f..ca80d861345d 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; if (pos + size >= AEGIS_BLOCK_SIZE) { if (pos > 0) { @@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, pos += left; assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, size); } if (pos > 0) { @@ -323,8 +320,9 @@ static __always_inline int crypto_aegis128_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, void (*crypt)(struct aegis_state *state, - u8 *dst, const u8 *src, - unsigned int size)) + u8 *dst, + const u8 *src, + unsigned int size)) { int err = 0; @@ -515,7 +513,6 @@ static struct aead_alg crypto_aegis128_alg_generic = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 100, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-generic", @@ -534,7 +531,6 @@ static struct aead_alg crypto_aegis128_alg_simd = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 200, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-simd", @@ -570,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void) crypto_unregister_aead(&crypto_aegis128_alg_generic); } -subsys_initcall(crypto_aegis128_module_init); +module_init(crypto_aegis128_module_init); module_exit(crypto_aegis128_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c index 7de485907d81..b6a52a386b22 100644 --- a/crypto/aegis128-neon-inner.c +++ b/crypto/aegis128-neon-inner.c @@ -16,6 +16,7 @@ #define AEGIS_BLOCK_SIZE 16 #include <stddef.h> +#include "aegis-neon.h" extern int aegis128_have_aes_insn; diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index a7856915ec85..b41807e63bd3 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -4,20 +4,10 @@ */ #include <asm/cpufeature.h> -#include <asm/neon.h> +#include <asm/simd.h> #include "aegis.h" - -void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); -void crypto_aegis128_update_neon(void *state, const void *msg); -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -int crypto_aegis128_final_neon(void *state, void *tag_xor, - unsigned int assoclen, - unsigned int cryptlen, - unsigned int authsize); +#include "aegis-neon.h" int aegis128_have_aes_insn __ro_after_init; @@ -34,32 +24,28 @@ void crypto_aegis128_init_simd(struct aegis_state *state, const union aegis_block *key, const u8 *iv) { - kernel_neon_begin(); - crypto_aegis128_init_neon(state, key, iv); - kernel_neon_end(); + scoped_ksimd() + crypto_aegis128_init_neon(state, key, iv); } void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg) { - kernel_neon_begin(); - crypto_aegis128_update_neon(state, msg); - kernel_neon_end(); + scoped_ksimd() + crypto_aegis128_update_neon(state, msg); } void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { - kernel_neon_begin(); - crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); + scoped_ksimd() + crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); } void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { - kernel_neon_begin(); - crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); + scoped_ksimd() + crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); } int crypto_aegis128_final_simd(struct aegis_state *state, @@ -68,12 +54,7 @@ int crypto_aegis128_final_simd(struct aegis_state *state, unsigned int cryptlen, unsigned int authsize) { - int ret; - - kernel_neon_begin(); - ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen, - authsize); - kernel_neon_end(); - - return ret; + scoped_ksimd() + return crypto_aegis128_final_neon(state, tag_xor, assoclen, + cryptlen, authsize); } diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 27ab27931813..85d2e78c8ef2 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -48,13 +48,13 @@ */ #include <crypto/aes.h> +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static inline u8 byte(const u32 x, const unsigned n) { @@ -1311,7 +1311,7 @@ static void __exit aes_fini(void) crypto_unregister_alg(&aes_alg); } -subsys_initcall(aes_init); +module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 205c2c257d49..a3b342f92fab 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -6,7 +6,7 @@ */ #include <crypto/aes.h> -#include <linux/crypto.h> +#include <crypto/algapi.h> #include <linux/module.h> static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 8bd288d2b089..e468714f539d 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -12,6 +12,8 @@ #include <linux/crypto.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/key.h> +#include <linux/key-type.h> #include <linux/list.h> #include <linux/module.h> #include <linux/net.h> @@ -19,18 +21,19 @@ #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/security.h> +#include <linux/string.h> +#include <keys/user-type.h> +#include <keys/trusted-type.h> +#include <keys/encrypted-type.h> struct alg_type_list { const struct af_alg_type *type; struct list_head list; }; -static atomic_long_t alg_memory_allocated; - static struct proto alg_proto = { .name = "ALG", .owner = THIS_MODULE, - .memory_allocated = &alg_memory_allocated, .obj_size = sizeof(struct alg_sock), }; @@ -142,7 +145,7 @@ void af_alg_release_parent(struct sock *sk) } EXPORT_SYMBOL_GPL(af_alg_release_parent); -static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int alg_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; @@ -225,6 +228,132 @@ out: return err; } +#ifdef CONFIG_KEYS + +static const u8 *key_data_ptr_user(const struct key *key, + unsigned int *datalen) +{ + const struct user_key_payload *ukp; + + ukp = user_key_payload_locked(key); + if (IS_ERR_OR_NULL(ukp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = key->datalen; + + return ukp->data; +} + +static const u8 *key_data_ptr_encrypted(const struct key *key, + unsigned int *datalen) +{ + const struct encrypted_key_payload *ekp; + + ekp = dereference_key_locked(key); + if (IS_ERR_OR_NULL(ekp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = ekp->decrypted_datalen; + + return ekp->decrypted_data; +} + +static const u8 *key_data_ptr_trusted(const struct key *key, + unsigned int *datalen) +{ + const struct trusted_key_payload *tkp; + + tkp = dereference_key_locked(key); + if (IS_ERR_OR_NULL(tkp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = tkp->key_len; + + return tkp->key; +} + +static struct key *lookup_key(key_serial_t serial) +{ + key_ref_t key_ref; + + key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH); + if (IS_ERR(key_ref)) + return ERR_CAST(key_ref); + + return key_ref_to_ptr(key_ref); +} + +static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, + unsigned int optlen) +{ + const struct af_alg_type *type = ask->type; + u8 *key_data = NULL; + unsigned int key_datalen; + key_serial_t serial; + struct key *key; + const u8 *ret; + int err; + + if (optlen != sizeof(serial)) + return -EINVAL; + + if (copy_from_sockptr(&serial, optval, optlen)) + return -EFAULT; + + key = lookup_key(serial); + if (IS_ERR(key)) + return PTR_ERR(key); + + down_read(&key->sem); + + ret = ERR_PTR(-ENOPROTOOPT); + if (!strcmp(key->type->name, "user") || + !strcmp(key->type->name, "logon")) { + ret = key_data_ptr_user(key, &key_datalen); + } else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) && + !strcmp(key->type->name, "encrypted")) { + ret = key_data_ptr_encrypted(key, &key_datalen); + } else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) && + !strcmp(key->type->name, "trusted")) { + ret = key_data_ptr_trusted(key, &key_datalen); + } + + if (IS_ERR(ret)) { + up_read(&key->sem); + key_put(key); + return PTR_ERR(ret); + } + + key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); + if (!key_data) { + up_read(&key->sem); + key_put(key); + return -ENOMEM; + } + + memcpy(key_data, ret, key_datalen); + + up_read(&key->sem); + key_put(key); + + err = type->setkey(ask->private, key_data, key_datalen); + + sock_kzfree_s(&ask->sk, key_data, key_datalen); + + return err; +} + +#else + +static inline int alg_setkey_by_key_serial(struct alg_sock *ask, + sockptr_t optval, + unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +#endif + static int alg_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -245,12 +374,16 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case ALG_SET_KEY: + case ALG_SET_KEY_BY_KEY_SERIAL: if (sock->state == SS_CONNECTED) goto unlock; if (!type->setkey) goto unlock; - err = alg_setkey(sk, optval, optlen); + if (optname == ALG_SET_KEY_BY_KEY_SERIAL) + err = alg_setkey_by_key_serial(ask, optval, optlen); + else + err = alg_setkey(sk, optval, optlen); break; case ALG_SET_AEAD_AUTHSIZE: if (sock->state == SS_CONNECTED) @@ -274,7 +407,8 @@ unlock: return err; } -int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) +int af_alg_accept(struct sock *sk, struct socket *newsock, + struct proto_accept_arg *arg) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; @@ -289,7 +423,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (!type) goto unlock; - sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern); + sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern); err = -ENOMEM; if (!sk2) goto unlock; @@ -335,10 +469,10 @@ unlock: } EXPORT_SYMBOL_GPL(af_alg_accept); -static int alg_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) +static int alg_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) { - return af_alg_accept(sock->sk, newsock, kern); + return af_alg_accept(sock->sk, newsock, arg); } static const struct proto_ops alg_proto_ops = { @@ -352,7 +486,6 @@ static const struct proto_ops alg_proto_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, @@ -401,50 +534,25 @@ static const struct net_proto_family alg_family = { .owner = THIS_MODULE, }; -int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) -{ - size_t off; - ssize_t n; - int npages, i; - - n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); - if (n < 0) - return n; - - npages = DIV_ROUND_UP(off + n, PAGE_SIZE); - if (WARN_ON(npages == 0)) - return -EINVAL; - /* Add one extra for linking */ - sg_init_table(sgl->sg, npages + 1); - - for (i = 0, len = n; i < npages; i++) { - int plen = min_t(int, len, PAGE_SIZE - off); - - sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); - - off = 0; - len -= plen; - } - sg_mark_end(sgl->sg + npages - 1); - sgl->npages = npages; - - return n; -} -EXPORT_SYMBOL_GPL(af_alg_make_sg); - static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) { - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); + sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1); + sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl); } void af_alg_free_sg(struct af_alg_sgl *sgl) { int i; - for (i = 0; i < sgl->npages; i++) - put_page(sgl->pages[i]); + if (sgl->sgt.sgl) { + if (sgl->need_unpin) + for (i = 0; i < sgl->sgt.nents; i++) + unpin_user_page(sg_page(&sgl->sgt.sgl[i])); + if (sgl->sgt.sgl != sgl->sgl) + kvfree(sgl->sgt.sgl); + sgl->sgt.sgl = NULL; + } } EXPORT_SYMBOL_GPL(af_alg_free_sg); @@ -740,7 +848,7 @@ void af_alg_wmem_wakeup(struct sock *sk) wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); @@ -807,7 +915,7 @@ static void af_alg_data_wakeup(struct sock *sk) wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | EPOLLRDNORM | EPOLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } @@ -862,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); + if (ctx->write) { + release_sock(sk); + return -EBUSY; + } + ctx->write = true; + if (ctx->init && !ctx->more) { if (ctx->used) { err = -EINVAL; @@ -885,10 +999,10 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, while (size) { struct scatterlist *sg; size_t len = size; - size_t plen; + ssize_t plen; /* use the existing memory in an allocated page */ - if (ctx->merge) { + if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) { sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); sg = sgl->sg + sgl->cur - 1; @@ -911,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, continue; } + ctx->merge = 0; + if (!af_alg_writable(sk)) { err = af_alg_wait_for_wmem(sk, msg->msg_flags); if (err) @@ -930,113 +1046,77 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, if (sgl->cur) sg_unmark_end(sg + sgl->cur - 1); - do { - unsigned int i = sgl->cur; - - plen = min_t(size_t, len, PAGE_SIZE); - - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); - if (!sg_page(sg + i)) { - err = -ENOMEM; + if (msg->msg_flags & MSG_SPLICE_PAGES) { + struct sg_table sgtable = { + .sgl = sg, + .nents = sgl->cur, + .orig_nents = sgl->cur, + }; + + plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable, + MAX_SGL_ENTS - sgl->cur, 0); + if (plen < 0) { + err = plen; goto unlock; } - err = memcpy_from_msg(page_address(sg_page(sg + i)), - msg, plen); - if (err) { - __free_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - goto unlock; - } - - sg[i].length = plen; + for (; sgl->cur < sgtable.nents; sgl->cur++) + get_page(sg_page(&sg[sgl->cur])); len -= plen; ctx->used += plen; copied += plen; size -= plen; - sgl->cur++; - } while (len && sgl->cur < MAX_SGL_ENTS); - - if (!size) - sg_mark_end(sg + sgl->cur - 1); - - ctx->merge = plen & (PAGE_SIZE - 1); - } - - err = 0; + } else { + do { + struct page *pg; + unsigned int i = sgl->cur; - ctx->more = msg->msg_flags & MSG_MORE; + plen = min_t(size_t, len, PAGE_SIZE); -unlock: - af_alg_data_wakeup(sk); - release_sock(sk); - - return copied ?: err; -} -EXPORT_SYMBOL_GPL(af_alg_sendmsg); + pg = alloc_page(GFP_KERNEL); + if (!pg) { + err = -ENOMEM; + goto unlock; + } -/** - * af_alg_sendpage - sendpage system call handler - * @sock: socket of connection to user space to write to - * @page: data to send - * @offset: offset into page to begin sending - * @size: length of data - * @flags: message send/receive flags - * - * This is a generic implementation of sendpage to fill ctx->tsgl_list. - */ -ssize_t af_alg_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - struct af_alg_tsgl *sgl; - int err = -EINVAL; + sg_assign_page(sg + i, pg); - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; + err = memcpy_from_msg( + page_address(sg_page(sg + i)), + msg, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; + sg[i].length = plen; + len -= plen; + ctx->used += plen; + copied += plen; + size -= plen; + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); - if (!size) - goto done; + ctx->merge = plen & (PAGE_SIZE - 1); + } - if (!af_alg_writable(sk)) { - err = af_alg_wait_for_wmem(sk, flags); - if (err) - goto unlock; + if (!size) + sg_mark_end(sg + sgl->cur - 1); } - err = af_alg_alloc_tsgl(sk); - if (err) - goto unlock; - - ctx->merge = 0; - sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); - - if (sgl->cur) - sg_unmark_end(sgl->sg + sgl->cur - 1); - - sg_mark_end(sgl->sg + sgl->cur); - - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; + err = 0; -done: - ctx->more = flags & MSG_MORE; + ctx->more = msg->msg_flags & MSG_MORE; unlock: af_alg_data_wakeup(sk); + ctx->write = false; release_sock(sk); - return err ?: size; + return copied ?: err; } -EXPORT_SYMBOL_GPL(af_alg_sendpage); +EXPORT_SYMBOL_GPL(af_alg_sendmsg); /** * af_alg_free_resources - release resources required for crypto request @@ -1045,15 +1125,19 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage); void af_alg_free_resources(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; + struct af_alg_ctx *ctx; af_alg_free_areq_sgls(areq); sock_kfree_s(sk, areq, areq->areqlen); + + ctx = alg_sk(sk)->private; + ctx->inflight = false; } EXPORT_SYMBOL_GPL(af_alg_free_resources); /** * af_alg_async_cb - AIO callback handler - * @_req: async request info + * @data: async request completion data * @err: if non-zero, error result to be returned via ki_complete(); * otherwise return the AIO output length via ki_complete(). * @@ -1063,9 +1147,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources); * The number of bytes to be generated with the AIO operation must be set * in areq->outlen before the AIO callback handler is invoked. */ -void af_alg_async_cb(struct crypto_async_request *_req, int err) +void af_alg_async_cb(void *data, int err) { - struct af_alg_async_req *areq = _req->data; + struct af_alg_async_req *areq = data; struct sock *sk = areq->sk; struct kiocb *iocb = areq->iocb; unsigned int resultlen; @@ -1076,7 +1160,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) af_alg_free_resources(areq); sock_put(sk); - iocb->ki_complete(iocb, err ? err : (int)resultlen, 0); + iocb->ki_complete(iocb, err ? err : (int)resultlen); } EXPORT_SYMBOL_GPL(af_alg_async_cb); @@ -1117,17 +1201,25 @@ EXPORT_SYMBOL_GPL(af_alg_poll); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) { - struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + struct af_alg_ctx *ctx = alg_sk(sk)->private; + struct af_alg_async_req *areq; + + /* Only one AIO request can be in flight. */ + if (ctx->inflight) + return ERR_PTR(-EBUSY); + areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); if (unlikely(!areq)) return ERR_PTR(-ENOMEM); + memset(areq, 0, areqlen); + + ctx->inflight = true; + areq->areqlen = areqlen; areq->sk = sk; - areq->last_rsgl = NULL; + areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl; INIT_LIST_HEAD(&areq->rsgl_list); - areq->tsgl = NULL; - areq->tsgl_entries = 0; return areq; } @@ -1155,8 +1247,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, while (maxsize > len && msg_data_left(msg)) { struct af_alg_rsgl *rsgl; + ssize_t err; size_t seglen; - int err; /* limit the amount of readable buffers */ if (!af_alg_readable(sk)) @@ -1173,16 +1265,23 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, return -ENOMEM; } - rsgl->sgl.npages = 0; + rsgl->sgl.need_unpin = + iov_iter_extract_will_pin(&msg->msg_iter); + rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; + rsgl->sgl.sgt.nents = 0; + rsgl->sgl.sgt.orig_nents = 0; list_add_tail(&rsgl->list, &areq->rsgl_list); - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); + sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES); + err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt, + ALG_MAX_PAGES, 0); if (err < 0) { rsgl->sg_num_bytes = 0; return err; } + sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); + /* chain the new scatterlist with previous one */ if (areq->last_rsgl) af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); @@ -1191,7 +1290,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, len += err; atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; - iov_iter_advance(&msg->msg_iter, err); } *outlen = len; @@ -1226,5 +1324,6 @@ static void __exit af_alg_exit(void) module_init(af_alg_init); module_exit(af_alg_exit); +MODULE_DESCRIPTION("Crypto userspace interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_ALG); diff --git a/crypto/ahash.c b/crypto/ahash.c index c2ca631a111f..66492ae75fcf 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -2,59 +2,82 @@ /* * Asynchronous Cryptographic Hash operations. * - * This is the asynchronous version of hash.c with notification of - * completion via a callback. + * This is the implementation of the ahash (asynchronous hash) API. It differs + * from shash (synchronous hash) in that ahash supports asynchronous operations, + * and it hashes data from scatterlists instead of virtually addressed buffers. + * + * The ahash API provides access to both ahash and shash algorithms. The shash + * API only provides access to shash algorithms. * * Copyright (c) 2008 Loc Ho <lho@amcc.com> */ -#include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> +#include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/seq_file.h> -#include <linux/cryptouser.h> -#include <linux/compiler.h> +#include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> -#include "internal.h" +#include "hash.h" -static const struct crypto_type crypto_ahash_type; +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -struct ahash_request_priv { - crypto_completion_t complete; - void *data; - u8 *result; - u32 flags; - void *ubuf[] CRYPTO_MINALIGN_ATTR; -}; +static int ahash_def_finup(struct ahash_request *req); + +static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} -static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) +static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm) { - return container_of(crypto_hash_alg_common(hash), struct ahash_alg, - halg); + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_ALG_NEED_FALLBACK; +} + +static inline void ahash_op_done(void *data, int err, + int (*finish)(struct ahash_request *, int)) +{ + struct ahash_request *areq = data; + crypto_completion_t compl; + + compl = areq->saved_complete; + data = areq->saved_data; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + compl(data, err); } static int hash_walk_next(struct crypto_hash_walk *walk) { - unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); - walk->data = kmap_atomic(walk->pg); + walk->data = kmap_local_page(walk->pg); walk->data += offset; - - if (offset & alignmask) { - unsigned int unaligned = alignmask + 1 - (offset & alignmask); - - if (nbytes > unaligned) - nbytes = unaligned; - } - walk->entrylen -= nbytes; return nbytes; } @@ -76,26 +99,37 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) return hash_walk_next(walk); } -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) { - unsigned int alignmask = walk->alignmask; + walk->total = req->nbytes; + walk->entrylen = 0; - walk->data -= walk->offset; + if (!walk->total) + return 0; - if (walk->entrylen && (walk->offset & alignmask) && !err) { - unsigned int nbytes; + walk->flags = req->base.flags; - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(walk->entrylen, - (unsigned int)(PAGE_SIZE - walk->offset)); - if (nbytes) { - walk->entrylen -= nbytes; - walk->data += walk->offset; - return nbytes; - } + if (ahash_request_isvirt(req)) { + walk->data = req->svirt; + walk->total = 0; + return req->nbytes; } - kunmap_atomic(walk->data); + walk->sg = req->src; + + return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_hash_walk_first); + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) + return err; + + walk->data -= walk->offset; + + kunmap_local(walk->data); crypto_yield(walk->flags); if (err) @@ -116,374 +150,630 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) } EXPORT_SYMBOL_GPL(crypto_hash_walk_done); -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) +/* + * For an ahash tfm that is using an shash algorithm (instead of an ahash + * algorithm), this returns the underlying shash tfm. + */ +static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm) { - walk->total = req->nbytes; - - if (!walk->total) { - walk->entrylen = 0; - return 0; - } + return *(struct crypto_shash **)crypto_ahash_ctx(tfm); +} - walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); - walk->sg = req->src; - walk->flags = req->base.flags; +static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + struct shash_desc *desc = ahash_request_ctx(req); - return hash_walk_new_entry(walk); + desc->tfm = ahash_to_shash(tfm); + return desc; } -EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) { - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int ret; - u8 *buffer, *alignbuffer; - unsigned long absize; + struct crypto_hash_walk walk; + int nbytes; - absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_KERNEL); - if (!buffer) - return -ENOMEM; + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; + nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_shash_update(desc, walk.data, nbytes); - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - ret = tfm->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return ret; + return nbytes; } +EXPORT_SYMBOL_GPL(shash_ahash_update); -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) { - return -ENOSYS; + struct crypto_hash_walk walk; + int nbytes; + + nbytes = crypto_hash_walk_first(req, &walk); + if (!nbytes) + return crypto_shash_final(desc, req->result); + + do { + nbytes = crypto_hash_walk_last(&walk) ? + crypto_shash_finup(desc, walk.data, nbytes, + req->result) : + crypto_shash_update(desc, walk.data, nbytes); + nbytes = crypto_hash_walk_done(&walk, nbytes); + } while (nbytes > 0); + + return nbytes; } +EXPORT_SYMBOL_GPL(shash_ahash_finup); -static void ahash_set_needkey(struct crypto_ahash *tfm) +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { - const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + unsigned int nbytes = req->nbytes; + struct scatterlist *sg; + unsigned int offset; + struct page *page; + const u8 *data; + int err; - if (tfm->setkey != ahash_nosetkey && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + data = req->svirt; + if (!nbytes || ahash_request_isvirt(req)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + sg = req->src; + if (nbytes > sg->length) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + page = sg_page(sg); + offset = sg->offset; + data = lowmem_page_address(page) + offset; + if (!IS_ENABLED(CONFIG_HIGHMEM)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + page += offset >> PAGE_SHIFT; + offset = offset_in_page(offset); + + if (nbytes > (unsigned int)PAGE_SIZE - offset) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + data = kmap_local_page(page); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); + return err; } +EXPORT_SYMBOL_GPL(shash_ahash_digest); -int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) +static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm) { - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - if ((unsigned long)key & alignmask) - err = ahash_setkey_unaligned(tfm, key, keylen); - else - err = tfm->setkey(tfm, key, keylen); + crypto_free_shash(*ctx); +} - if (unlikely(err)) { - ahash_set_needkey(tfm); - return err; +static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *shash; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + shash = crypto_create_tfm(calg, &crypto_shash_type); + if (IS_ERR(shash)) { + crypto_mod_put(calg); + return PTR_ERR(shash); } - crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + crt->using_shash = true; + *ctx = shash; + tfm->exit = crypto_exit_ahash_using_shash; + + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + return 0; } -EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static inline unsigned int ahash_align_buffer_size(unsigned len, - unsigned long mask) +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) { - return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); + return -ENOSYS; } -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) +static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request_priv *priv; - - priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!priv) - return -ENOMEM; - - /* - * WARNING: Voodoo programming below! - * - * The code below is obscure and hard to understand, thus explanation - * is necessary. See include/crypto/hash.h and include/linux/crypto.h - * to understand the layout of structures used here! - * - * The code here will replace portions of the ORIGINAL request with - * pointers to new code and buffers so the hashing operation can store - * the result in aligned buffer. We will call the modified request - * an ADJUSTED request. - * - * The newly mangled request will look as such: - * - * req { - * .result = ADJUSTED[new aligned buffer] - * .base.complete = ADJUSTED[pointer to completion function] - * .base.data = ADJUSTED[*req (pointer to self)] - * .priv = ADJUSTED[new priv] { - * .result = ORIGINAL(result) - * .complete = ORIGINAL(base.complete) - * .data = ORIGINAL(base.data) - * } - */ - - priv->result = req->result; - priv->complete = req->base.complete; - priv->data = req->base.data; - priv->flags = req->base.flags; - - /* - * WARNING: We do not backup req->priv here! The req->priv - * is for internal use of the Crypto API and the - * user must _NOT_ _EVER_ depend on it's content! - */ - - req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); - req->base.complete = cplt; - req->base.data = req; - req->priv = priv; + if (alg->setkey != ahash_nosetkey && + !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + if (likely(tfm->using_shash)) { + struct crypto_shash *shash = ahash_to_shash(tfm); + int err; + + err = crypto_shash_setkey(shash, key, keylen); + if (unlikely(err)) { + crypto_ahash_set_flags(tfm, + crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + return err; + } + } else { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + int err; + + err = alg->setkey(tfm, key, keylen); + if (!err && crypto_ahash_need_fallback(tfm)) + err = crypto_ahash_setkey(crypto_ahash_fb(tfm), + key, keylen); + if (unlikely(err)) { + ahash_set_needkey(tfm, alg); + return err; + } + } + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } +EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static void ahash_restore_req(struct ahash_request *req, int err) +static int ahash_do_req_chain(struct ahash_request *req, + int (*const *op)(struct ahash_request *req)) { - struct ahash_request_priv *priv = req->priv; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int err; + + if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req)) + return (*op)(req); + + if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) + return -ENOSYS; - if (!err) - memcpy(priv->result, req->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + if (!crypto_ahash_need_fallback(tfm)) + return -ENOSYS; - /* Restore the original crypto request. */ - req->result = priv->result; + if (crypto_hash_no_export_core(tfm)) + return -ENOSYS; - ahash_request_set_callback(req, priv->flags, - priv->complete, priv->data); - req->priv = NULL; + { + u8 state[HASH_MAX_STATESIZE]; - /* Free the req->priv.priv from the ADJUSTED request. */ - kfree_sensitive(priv); + if (op == &crypto_ahash_alg(tfm)->digest) { + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = crypto_ahash_digest(req); + goto out_no_state; + } + + err = crypto_ahash_export(req, state); + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = err ?: crypto_ahash_import(req, state); + + if (op == &crypto_ahash_alg(tfm)->finup) { + err = err ?: crypto_ahash_finup(req); + goto out_no_state; + } + + err = err ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, state); + + ahash_request_set_tfm(req, tfm); + return err ?: crypto_ahash_import(req, state); + +out_no_state: + ahash_request_set_tfm(req, tfm); + return err; + } } -static void ahash_notify_einprogress(struct ahash_request *req) +int crypto_ahash_init(struct ahash_request *req) { - struct ahash_request_priv *priv = req->priv; - struct crypto_async_request oreq; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - oreq.data = priv->data; + if (likely(tfm->using_shash)) + return crypto_shash_init(prepare_shash_desc(req, tfm)); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (crypto_ahash_block_only(tfm)) { + u8 *buf = ahash_request_ctx(req); + + buf += crypto_ahash_reqsize(tfm) - 1; + *buf = 0; + } + return crypto_ahash_alg(tfm)->init(req); +} +EXPORT_SYMBOL_GPL(crypto_ahash_init); - priv->complete(&oreq, -EINPROGRESS); +static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) +{ + req->saved_complete = req->base.complete; + req->saved_data = req->base.data; + req->base.complete = cplt; + req->base.data = req; } -static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +static void ahash_restore_req(struct ahash_request *req) { - struct ahash_request *areq = req->data; + req->base.complete = req->saved_complete; + req->base.data = req->saved_data; +} - if (err == -EINPROGRESS) { - ahash_notify_einprogress(areq); - return; +static int ahash_update_finish(struct ahash_request *req, int err) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + u8 *buf; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); } - /* - * Restore the original request, see ahash_op_unaligned() for what - * goes where. - * - * The "struct ahash_request *req" here is in fact the "req.base" - * from the ADJUSTED request from ahash_op_unaligned(), thus as it - * is a pointer to self, it is also the ADJUSTED "req" . - */ + req->nbytes += nonzero - blen; - /* First copy req->result into req->priv.result */ - ahash_restore_req(areq, err); + blen = 0; + if (err >= 0) { + blen = err + nonzero; + err = 0; + } + if (ahash_request_isvirt(req)) + memcpy(buf, req->svirt + req->nbytes - blen, blen); + else + memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen); + *blenp = blen; - /* Complete the ORIGINAL request. */ - areq->base.complete(&areq->base, err); + ahash_restore_req(req); + + return err; } -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *)) +static void ahash_update_done(void *data, int err) { - int err; + ahash_op_done(data, err, ahash_update_finish); +} - err = ahash_save_req(req, ahash_op_unaligned_done); - if (err) - return err; +int crypto_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; + + if (likely(tfm->using_shash)) + return shash_ahash_update(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen + req->nbytes < bs + nonzero) { + if (ahash_request_isvirt(req)) + memcpy(buf + blen, req->svirt, req->nbytes); + else + memcpy_from_sglist(buf + blen, req->src, 0, + req->nbytes); + + *blenp += req->nbytes; + return 0; + } - err = op(req); + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } + req->nbytes -= nonzero; + + ahash_save_req(req, ahash_update_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); if (err == -EINPROGRESS || err == -EBUSY) return err; - ahash_restore_req(req, err); - - return err; + return ahash_update_finish(req, err); } +EXPORT_SYMBOL_GPL(crypto_ahash_update); -static int crypto_ahash_op(struct ahash_request *req, - int (*op)(struct ahash_request *)) +static int ahash_finup_finish(struct ahash_request *req, int err) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + + if (blen) { + if (sg_is_last(req->src)) + req->src = NULL; + else { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + req->nbytes -= blen; + } - if ((unsigned long)req->result & alignmask) - return ahash_op_unaligned(req, op); + ahash_restore_req(req); - return op(req); + return err; } -int crypto_ahash_final(struct ahash_request *req) +static void ahash_finup_done(void *data, int err) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int nbytes = req->nbytes; - int ret; - - crypto_stats_get(alg); - ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); - crypto_stats_ahash_final(nbytes, ret, alg); - return ret; + ahash_op_done(data, err, ahash_finup_finish); } -EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int nbytes = req->nbytes; - int ret; + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; + + if (likely(tfm->using_shash)) + return shash_ahash_finup(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (!req->src) + sg_mark_end(req->sg_head); + else if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } - crypto_stats_get(alg); - ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); - crypto_stats_ahash_final(nbytes, ret, alg); - return ret; + ahash_save_req(req, ahash_finup_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_finup_finish(req, err); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int nbytes = req->nbytes; - int ret; - crypto_stats_get(alg); + if (likely(tfm->using_shash)) + return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_ahash_op(req, tfm->digest); - crypto_stats_ahash_final(nbytes, ret, alg); - return ret; + return -ENOKEY; + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); -static void ahash_def_finup_done2(struct crypto_async_request *req, int err) +static void ahash_def_finup_done2(void *data, int err) { - struct ahash_request *areq = req->data; + struct ahash_request *areq = data; if (err == -EINPROGRESS) return; - ahash_restore_req(areq, err); - - areq->base.complete(&areq->base, err); + ahash_restore_req(areq); + ahash_request_complete(areq, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + if (err) goto out; req->base.complete = ahash_def_finup_done2; - err = crypto_ahash_reqtfm(req)->final(req); + err = crypto_ahash_alg(tfm)->final(req); if (err == -EINPROGRESS || err == -EBUSY) return err; out: - ahash_restore_req(req, err); + ahash_restore_req(req); return err; } -static void ahash_def_finup_done1(struct crypto_async_request *req, int err) +static void ahash_def_finup_done1(void *data, int err) { - struct ahash_request *areq = req->data; - - if (err == -EINPROGRESS) { - ahash_notify_einprogress(areq); - return; - } - - areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - err = ahash_def_finup_finish1(areq, err); - if (areq->priv) - return; - - areq->base.complete(&areq->base, err); + ahash_op_done(data, err, ahash_def_finup_finish1); } static int ahash_def_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; - err = ahash_save_req(req, ahash_def_finup_done1); - if (err) - return err; + ahash_save_req(req, ahash_def_finup_done1); - err = tfm->update(req); + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export_core(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export_core(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export_core(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export_core); + +int crypto_ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export(ahash_request_ctx(req), out); + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(out + ss - plen, buf + reqsize - plen, plen); + } + return crypto_ahash_alg(tfm)->export(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export); + +int crypto_ahash_import_core(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import_core(prepare_shash_desc(req, tfm), + in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int reqsize = crypto_ahash_reqsize(tfm); + u8 *buf = ahash_request_ctx(req); + + buf[reqsize - 1] = 0; + } + return crypto_ahash_alg(tfm)->import_core(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import_core); + +int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import(prepare_shash_desc(req, tfm), in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(buf + reqsize - plen, in + ss - plen, plen); + if (buf[reqsize - 1] >= plen) + return -EOVERFLOW; + } + return crypto_ahash_alg(tfm)->import(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import); + static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - alg->exit_tfm(hash); + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + + if (crypto_ahash_need_fallback(hash)) + crypto_free_ahash(crypto_ahash_fb(hash)); } static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); + struct crypto_ahash *fb = NULL; + int err; - hash->setkey = ahash_nosetkey; + crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm)); - if (tfm->__crt_alg->cra_type != &crypto_ahash_type) - return crypto_init_shash_ops_async(tfm); + if (tfm->__crt_alg->cra_type == &crypto_shash_type) + return crypto_init_ahash_using_shash(tfm); - hash->init = alg->init; - hash->update = alg->update; - hash->final = alg->final; - hash->finup = alg->finup ?: ahash_def_finup; - hash->digest = alg->digest; - hash->export = alg->export; - hash->import = alg->import; + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash), + CRYPTO_ALG_REQ_VIRT, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_REQ_VIRT | + CRYPTO_AHASH_ALG_NO_EXPORT_CORE); + if (IS_ERR(fb)) + return PTR_ERR(fb); - if (alg->setkey) { - hash->setkey = alg->setkey; - ahash_set_needkey(hash); + tfm->fb = crypto_ahash_tfm(fb); } - if (alg->exit_tfm) - tfm->exit = crypto_ahash_exit_tfm; + ahash_set_needkey(hash, alg); + + tfm->exit = crypto_ahash_exit_tfm; + + if (alg->init_tfm) + err = alg->init_tfm(hash); + else if (tfm->__crt_alg->cra_init) + err = tfm->__crt_alg->cra_init(tfm); + else + return 0; - return alg->init_tfm ? alg->init_tfm(hash) : 0; + if (err) + goto out_free_sync_hash; + + if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) > + MAX_SYNC_HASH_REQSIZE) + goto out_exit_tfm; + + BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE); + if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE) + crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE); + + return 0; + +out_exit_tfm: + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + err = -EINVAL; +out_free_sync_hash: + crypto_free_ahash(fb); + return err; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { - if (alg->cra_type != &crypto_ahash_type) + if (alg->cra_type == &crypto_shash_type) return sizeof(struct crypto_shash *); return crypto_alg_extsize(alg); @@ -496,8 +786,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst) ahash->free(ahash); } -#ifdef CONFIG_NET -static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_ahash_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; @@ -510,20 +800,14 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); } -#else -static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", __crypto_hash_alg_common(alg)->digestsize); @@ -536,11 +820,14 @@ static const struct crypto_type crypto_ahash_type = { #ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), + .algsize = offsetof(struct ahash_alg, halg.base), }; int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, @@ -565,19 +852,146 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type == &crypto_shash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; +} +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); + +struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) +{ + struct hash_alg_common *halg = crypto_hash_alg_common(hash); + struct crypto_tfm *tfm = crypto_ahash_tfm(hash); + struct crypto_ahash *fb = NULL; + struct crypto_ahash *nhash; + struct ahash_alg *alg; + int err; + + if (!crypto_hash_alg_has_setkey(halg)) { + tfm = crypto_tfm_get(tfm); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + return hash; + } + + nhash = crypto_clone_tfm(&crypto_ahash_type, tfm); + + if (IS_ERR(nhash)) + return nhash; + + nhash->reqsize = hash->reqsize; + nhash->statesize = hash->statesize; + + if (likely(hash->using_shash)) { + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); + struct crypto_shash *shash; + + shash = crypto_clone_shash(ahash_to_shash(hash)); + if (IS_ERR(shash)) { + err = PTR_ERR(shash); + goto out_free_nhash; + } + crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash; + nhash->using_shash = true; + *nctx = shash; + return nhash; + } + + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_clone_ahash(crypto_ahash_fb(hash)); + err = PTR_ERR(fb); + if (IS_ERR(fb)) + goto out_free_nhash; + + crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb); + } + + err = -ENOSYS; + alg = crypto_ahash_alg(hash); + if (!alg->clone_tfm) + goto out_free_fb; + + err = alg->clone_tfm(nhash, hash); + if (err) + goto out_free_fb; + + crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm; + + return nhash; + +out_free_fb: + crypto_free_ahash(fb); +out_free_nhash: + crypto_free_ahash(nhash); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_clone_ahash); + +static int ahash_default_export_core(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_default_import_core(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; + int err; + + if (alg->halg.statesize == 0) + return -EINVAL; - if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE || - alg->halg.statesize > HASH_MAX_STATESIZE || - alg->halg.statesize == 0) + if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize) return -EINVAL; + if (!(base->cra_flags & CRYPTO_ALG_ASYNC) && + base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) + return -EINVAL; + + if (base->cra_flags & CRYPTO_ALG_NEED_FALLBACK && + base->cra_flags & CRYPTO_ALG_NO_FALLBACK) + return -EINVAL; + + err = hash_prepare_alg(&alg->halg); + if (err) + return err; + base->cra_type = &crypto_ahash_type; - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT) && + !(base->cra_flags & CRYPTO_ALG_NO_FALLBACK)) + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + + if (!alg->setkey) + alg->setkey = ahash_nosetkey; + + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + if (!alg->finup) + return -EINVAL; + + base->cra_reqsize += base->cra_blocksize + 1; + alg->halg.statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = ahash_default_export_core; + alg->import_core = ahash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + return 0; } @@ -645,16 +1059,42 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); -bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +void ahash_request_free(struct ahash_request *req) { - struct crypto_alg *alg = &halg->base; + if (unlikely(!req)) + return; - if (alg->cra_type != &crypto_ahash_type) - return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + if (!ahash_req_on_stack(req)) { + kfree(req); + return; + } - return __crypto_ahash_alg(alg)->setkey != NULL; + ahash_request_zero(req); } -EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); +EXPORT_SYMBOL_GPL(ahash_request_free); + +int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm)); + int err; + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, out, len); + err = crypto_ahash_digest(req); + + ahash_request_zero(req); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_hash_digest); + +void ahash_free_singlespawn_instance(struct ahash_instance *inst) +{ + crypto_drop_spawn(ahash_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index f866085c8a4a..a36f50c83827 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -5,23 +5,36 @@ * Copyright (c) 2015, Intel Corporation * Authors: Tadeusz Struk <tadeusz.struk@intel.com> */ +#include <crypto/internal/akcipher.h> +#include <linux/cryptouser.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/crypto.h> -#include <linux/compiler.h> -#include <crypto/algapi.h> -#include <linux/cryptouser.h> #include <net/netlink.h> -#include <crypto/akcipher.h> -#include <crypto/internal/akcipher.h> + #include "internal.h" -#ifdef CONFIG_NET -static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +static int __maybe_unused crypto_akcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_akcipher rakcipher; @@ -32,12 +45,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rakcipher), &rakcipher); } -#else -static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -83,11 +90,14 @@ static const struct crypto_type crypto_akcipher_type = { #ifdef CONFIG_PROC_FS .show = crypto_akcipher_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_akcipher_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AKCIPHER, .tfmsize = offsetof(struct crypto_akcipher, base), + .algsize = offsetof(struct akcipher_alg, base), }; int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, @@ -120,18 +130,22 @@ static int akcipher_default_op(struct akcipher_request *req) return -ENOSYS; } +static int akcipher_default_set_key(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + return -ENOSYS; +} + int crypto_register_akcipher(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; - if (!alg->sign) - alg->sign = akcipher_default_op; - if (!alg->verify) - alg->verify = akcipher_default_op; if (!alg->encrypt) alg->encrypt = akcipher_default_op; if (!alg->decrypt) alg->decrypt = akcipher_default_op; + if (!alg->set_priv_key) + alg->set_priv_key = akcipher_default_set_key; akcipher_prepare_alg(alg); return crypto_register_alg(base); @@ -154,5 +168,89 @@ int akcipher_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(akcipher_register_instance); +static int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) +{ + unsigned int reqsize = crypto_akcipher_reqsize(data->tfm); + struct akcipher_request *req; + struct scatterlist *sg; + unsigned int mlen; + unsigned int len; + u8 *buf; + + mlen = max(data->slen, data->dlen); + + len = sizeof(*req) + reqsize + mlen; + if (len < mlen) + return -EOVERFLOW; + + req = kzalloc(len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + data->req = req; + akcipher_request_set_tfm(req, data->tfm); + + buf = (u8 *)(req + 1) + reqsize; + data->buf = buf; + memcpy(buf, data->src, data->slen); + + sg = &data->sg; + sg_init_one(sg, buf, mlen); + akcipher_request_set_crypt(req, sg, sg, data->slen, data->dlen); + + crypto_init_wait(&data->cwait); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &data->cwait); + + return 0; +} + +static int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, + int err) +{ + err = crypto_wait_req(err, &data->cwait); + memcpy(data->dst, data->buf, data->dlen); + data->dlen = data->req->dst_len; + kfree_sensitive(data->req); + return err; +} + +int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct crypto_akcipher_sync_data data = { + .tfm = tfm, + .src = src, + .dst = dst, + .slen = slen, + .dlen = dlen, + }; + + return crypto_akcipher_sync_prep(&data) ?: + crypto_akcipher_sync_post(&data, + crypto_akcipher_encrypt(data.req)); +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt); + +int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct crypto_akcipher_sync_data data = { + .tfm = tfm, + .src = src, + .dst = dst, + .slen = slen, + .dlen = dlen, + }; + + return crypto_akcipher_sync_prep(&data) ?: + crypto_akcipher_sync_post(&data, + crypto_akcipher_decrypt(data.req)) ?: + data.dlen; +} +EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 43f999dba4dc..e604d0d8b7b4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -16,6 +16,7 @@ #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/workqueue.h> #include "internal.h" @@ -68,13 +69,36 @@ static void crypto_free_instance(struct crypto_instance *inst) inst->alg.cra_type->free(inst); } +static void crypto_destroy_instance_workfn(struct work_struct *w) +{ + struct crypto_template *tmpl = container_of(w, struct crypto_template, + free_work); + struct crypto_instance *inst; + struct hlist_node *n; + HLIST_HEAD(list); + + down_write(&crypto_alg_sem); + hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) { + if (refcount_read(&inst->alg.cra_refcnt) != -1) + continue; + hlist_del(&inst->list); + hlist_add_head(&inst->list, &list); + } + up_write(&crypto_alg_sem); + + hlist_for_each_entry_safe(inst, n, &list, list) + crypto_free_instance(inst); +} + static void crypto_destroy_instance(struct crypto_alg *alg) { - struct crypto_instance *inst = (void *)alg; + struct crypto_instance *inst = container_of(alg, + struct crypto_instance, + alg); struct crypto_template *tmpl = inst->tmpl; - crypto_free_instance(inst); - crypto_tmpl_put(tmpl); + refcount_set(&alg->cra_refcnt, -1); + schedule_work(&tmpl->free_work); } /* @@ -120,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst, inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (!tmpl || !crypto_tmpl_get(tmpl)) + if (!tmpl) return; - list_move(&inst->alg.cra_list, list); + list_del_init(&inst->alg.cra_list); hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + hlist_add_head(&inst->list, &tmpl->dead); BUG_ON(!list_empty(&inst->alg.cra_users)); + + crypto_alg_put(&inst->alg); } /* @@ -216,7 +242,63 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, } EXPORT_SYMBOL_GPL(crypto_remove_spawns); -static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) +static void crypto_alg_finish_registration(struct crypto_alg *alg, + struct list_head *algs_to_put) +{ + struct crypto_alg *q; + + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (q == alg) + continue; + + if (crypto_is_moribund(q)) + continue; + + if (crypto_is_larval(q)) + continue; + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (strcmp(alg->cra_driver_name, q->cra_driver_name) && + q->cra_priority > alg->cra_priority) + continue; + + crypto_remove_spawns(q, algs_to_put, alg); + } + + crypto_notify(CRYPTO_MSG_ALG_LOADED, alg); +} + +static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) +{ + struct crypto_larval *larval; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) || + (alg->cra_flags & CRYPTO_ALG_INTERNAL)) + return NULL; /* No self-test needed */ + + larval = crypto_larval_alloc(alg->cra_name, + alg->cra_flags | CRYPTO_ALG_TESTED, 0); + if (IS_ERR(larval)) + return larval; + + larval->adult = crypto_mod_get(alg); + if (!larval->adult) { + kfree(larval); + return ERR_PTR(-ENOENT); + } + + refcount_set(&larval->alg.cra_refcnt, 1); + memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, + CRYPTO_MAX_ALG_NAME); + larval->alg.cra_priority = alg->cra_priority; + + return larval; +} + +static struct crypto_larval * +__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) { struct crypto_alg *q; struct crypto_larval *larval; @@ -227,9 +309,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) INIT_LIST_HEAD(&alg->cra_users); - /* No cheating! */ - alg->cra_flags &= ~CRYPTO_ALG_TESTED; - ret = -EEXIST; list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -246,35 +325,30 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) } if (!strcmp(q->cra_driver_name, alg->cra_name) || + !strcmp(q->cra_driver_name, alg->cra_driver_name) || !strcmp(q->cra_name, alg->cra_driver_name)) goto err; } - larval = crypto_larval_alloc(alg->cra_name, - alg->cra_flags | CRYPTO_ALG_TESTED, 0); + larval = crypto_alloc_test_larval(alg); if (IS_ERR(larval)) goto out; - ret = -ENOENT; - larval->adult = crypto_mod_get(alg); - if (!larval->adult) - goto free_larval; - - refcount_set(&larval->alg.cra_refcnt, 1); - memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, - CRYPTO_MAX_ALG_NAME); - larval->alg.cra_priority = alg->cra_priority; - list_add(&alg->cra_list, &crypto_alg_list); - list_add(&larval->alg.cra_list, &crypto_alg_list); - crypto_stats_init(alg); + if (larval) { + /* No cheating! */ + alg->cra_flags &= ~CRYPTO_ALG_TESTED; + + list_add(&larval->alg.cra_list, &crypto_alg_list); + } else { + alg->cra_flags |= CRYPTO_ALG_TESTED; + crypto_alg_finish_registration(alg, algs_to_put); + } out: return larval; -free_larval: - kfree(larval); err: larval = ERR_PTR(ret); goto out; @@ -286,7 +360,6 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); - bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -300,79 +373,34 @@ void crypto_alg_tested(const char *name, int err) } pr_err("alg: Unexpected test result for %s: %d\n", name, err); - goto unlock; + up_write(&crypto_alg_sem); + return; found: q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (err || list_empty(&alg->cra_list)) - goto complete; - - alg->cra_flags |= CRYPTO_ALG_TESTED; - - /* Only satisfy larval waiters if we are the best. */ - best = true; - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (crypto_is_moribund(q) || !crypto_is_larval(q)) - continue; - - if (strcmp(alg->cra_name, q->cra_name)) - continue; - - if (q->cra_priority > alg->cra_priority) { - best = false; - break; - } - } - - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (q == alg) - continue; - - if (crypto_is_moribund(q)) - continue; - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - - if (best && crypto_mod_get(alg)) - larval->adult = alg; - else - larval->adult = ERR_PTR(-EAGAIN); - - continue; - } + if (crypto_is_dead(alg)) + goto complete; - if (strcmp(alg->cra_name, q->cra_name)) - continue; + if (err == -ECANCELED) + alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL; + else if (err) + goto complete; + else + alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL; - if (strcmp(alg->cra_driver_name, q->cra_driver_name) && - q->cra_priority > alg->cra_priority) - continue; + alg->cra_flags |= CRYPTO_ALG_TESTED; - crypto_remove_spawns(q, &list, alg); - } + crypto_alg_finish_registration(alg, &list); complete: + list_del_init(&test->alg.cra_list); complete_all(&test->completion); -unlock: up_write(&crypto_alg_sem); + crypto_alg_put(&test->alg); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_alg_tested); @@ -389,29 +417,20 @@ void crypto_remove_final(struct list_head *list) } EXPORT_SYMBOL_GPL(crypto_remove_final); -static void crypto_wait_for_test(struct crypto_larval *larval) +static void crypto_free_alg(struct crypto_alg *alg) { - int err; + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; - err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (err != NOTIFY_STOP) { - if (WARN_ON(err != NOTIFY_DONE)) - goto out; - crypto_alg_tested(larval->alg.cra_driver_name, 0); - } - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); - if (!err) - crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); - -out: - crypto_larval_kill(&larval->alg); + crypto_destroy_alg(alg); + kfree(p); } int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; + bool test_started = false; + LIST_HEAD(algs_to_put); int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -419,14 +438,37 @@ int crypto_register_alg(struct crypto_alg *alg) if (err) return err; + if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST && + !WARN_ON_ONCE(alg->cra_destroy)) { + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL); + if (!p) + return -ENOMEM; + + alg = (void *)(p + algsize); + alg->cra_destroy = crypto_free_alg; + } + down_write(&crypto_alg_sem); - larval = __crypto_register_alg(alg); + larval = __crypto_register_alg(alg, &algs_to_put); + if (!IS_ERR_OR_NULL(larval)) { + test_started = crypto_boot_test_finished(); + larval->test_started = test_started; + } up_write(&crypto_alg_sem); - if (IS_ERR(larval)) + if (IS_ERR(larval)) { + crypto_alg_put(alg); return PTR_ERR(larval); + } + + if (test_started) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); - crypto_wait_for_test(larval); return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -456,10 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) return; - BUG_ON(refcount_read(&alg->cra_refcnt) != 1); - if (alg->cra_destroy) - alg->cra_destroy(alg); + WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1); + list_add(&alg->cra_list, &list); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_unregister_alg); @@ -498,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl) struct crypto_template *q; int err = -EEXIST; + INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn); + down_write(&crypto_alg_sem); crypto_check_module_sig(tmpl->module); @@ -559,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl) crypto_free_instance(inst); } crypto_remove_final(&users); + + flush_work(&tmpl->free_work); } EXPORT_SYMBOL_GPL(crypto_unregister_template); @@ -602,6 +647,8 @@ int crypto_register_instance(struct crypto_template *tmpl, { struct crypto_larval *larval; struct crypto_spawn *spawn; + u32 fips_internal = 0; + LIST_HEAD(algs_to_put); int err; err = crypto_check_alg(&inst->alg); @@ -610,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl, inst->alg.cra_module = tmpl->module; inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; + inst->alg.cra_destroy = crypto_destroy_instance; down_write(&crypto_alg_sem); @@ -624,14 +672,20 @@ int crypto_register_instance(struct crypto_template *tmpl, spawn->inst = inst; spawn->registered = true; + fips_internal |= spawn->alg->cra_flags; + crypto_mod_put(spawn->alg); spawn = next; } - larval = __crypto_register_alg(&inst->alg); + inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL); + + larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; + else if (larval) + larval->test_started = true; hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -639,15 +693,15 @@ int crypto_register_instance(struct crypto_template *tmpl, unlock: up_write(&crypto_alg_sem); - err = PTR_ERR(larval); if (IS_ERR(larval)) - goto err; + return PTR_ERR(larval); - crypto_wait_for_test(larval); - err = 0; + if (larval) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); -err: - return err; + return 0; } EXPORT_SYMBOL_GPL(crypto_register_instance); @@ -679,7 +733,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, if (IS_ERR(name)) return PTR_ERR(name); - alg = crypto_find_alg(name, spawn->frontend, type, mask); + alg = crypto_find_alg(name, spawn->frontend, + type | CRYPTO_ALG_FIPS_INTERNAL, mask); if (IS_ERR(alg)) return PTR_ERR(alg); @@ -868,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg) +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return 0; } -EXPORT_SYMBOL_GPL(crypto_inst_setname); +EXPORT_SYMBOL_GPL(__crypto_inst_setname); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { @@ -918,6 +973,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request); void crypto_enqueue_request_head(struct crypto_queue *queue, struct crypto_async_request *request) { + if (unlikely(queue->qlen >= queue->max_qlen)) + queue->backlog = queue->backlog->prev; + queue->qlen++; list_add(&request->list, &queue->list); } @@ -936,7 +994,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) queue->backlog = queue->backlog->next; request = queue->list.next; - list_del(request); + list_del_init(request); return list_entry(request, struct crypto_async_request, list); } @@ -973,59 +1031,6 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) -{ - int relalign = 0; - - if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - int size = sizeof(unsigned long); - int d = (((unsigned long)dst ^ (unsigned long)src1) | - ((unsigned long)dst ^ (unsigned long)src2)) & - (size - 1); - - relalign = d ? 1 << __ffs(d) : size; - - /* - * If we care about alignment, process as many bytes as - * needed to advance dst and src to values whose alignments - * equal their relative alignment. This will allow us to - * process the remainder of the input using optimal strides. - */ - while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ = *src1++ ^ *src2++; - len--; - } - } - - while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; - dst += 8; - src1 += 8; - src2 += 8; - len -= 8; - } - - while (len >= 4 && !(relalign & 3)) { - *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; - dst += 4; - src1 += 4; - src2 += 4; - len -= 4; - } - - while (len >= 2 && !(relalign & 1)) { - *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; - dst += 2; - src1 += 2; - src2 += 2; - len -= 2; - } - - while (len--) - *dst++ = *src1++ ^ *src2++; -} -EXPORT_SYMBOL_GPL(__crypto_xor); - unsigned int crypto_alg_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize + @@ -1048,222 +1053,54 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, } EXPORT_SYMBOL_GPL(crypto_type_has_alg); -#ifdef CONFIG_CRYPTO_STATS -void crypto_stats_init(struct crypto_alg *alg) +static void __init crypto_start_tests(void) { - memset(&alg->stats, 0, sizeof(alg->stats)); -} -EXPORT_SYMBOL_GPL(crypto_stats_init); - -void crypto_stats_get(struct crypto_alg *alg) -{ - crypto_alg_get(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_get); - -void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.err_cnt); - } else { - atomic64_inc(&alg->stats.aead.encrypt_cnt); - atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt); - -void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.err_cnt); - } else { - atomic64_inc(&alg->stats.aead.decrypt_cnt); - atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt); - -void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.err_cnt); - } else { - atomic64_inc(&alg->stats.akcipher.encrypt_cnt); - atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt); + if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI)) + return; -void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.err_cnt); - } else { - atomic64_inc(&alg->stats.akcipher.decrypt_cnt); - atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return; -void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.err_cnt); - else - atomic64_inc(&alg->stats.akcipher.sign_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); + set_crypto_boot_test_finished(); -void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.err_cnt); - else - atomic64_inc(&alg->stats.akcipher.verify_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); + for (;;) { + struct crypto_larval *larval = NULL; + struct crypto_alg *q; -void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.err_cnt); - } else { - atomic64_inc(&alg->stats.compress.compress_cnt); - atomic64_add(slen, &alg->stats.compress.compress_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_compress); + down_write(&crypto_alg_sem); -void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.err_cnt); - } else { - atomic64_inc(&alg->stats.compress.decompress_cnt); - atomic64_add(slen, &alg->stats.compress.decompress_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_decompress); - -void crypto_stats_ahash_update(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.hash.err_cnt); - else - atomic64_add(nbytes, &alg->stats.hash.hash_tlen); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ahash_update); + list_for_each_entry(q, &crypto_alg_list, cra_list) { + struct crypto_larval *l; -void crypto_stats_ahash_final(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.hash.err_cnt); - } else { - atomic64_inc(&alg->stats.hash.hash_cnt); - atomic64_add(nbytes, &alg->stats.hash.hash_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); + if (!crypto_is_larval(q)) + continue; -void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.setsecret_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); + l = (void *)q; -void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.generate_public_key_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); + if (!crypto_is_test_larval(l)) + continue; -void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); + if (l->test_started) + continue; -void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.rng.err_cnt); - else - atomic64_inc(&alg->stats.rng.seed_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_rng_seed); + l->test_started = true; + larval = l; + break; + } -void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.rng.err_cnt); - } else { - atomic64_inc(&alg->stats.rng.generate_cnt); - atomic64_add(dlen, &alg->stats.rng.generate_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_rng_generate); + up_write(&crypto_alg_sem); -void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.encrypt_cnt); - atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt); + if (!larval) + break; -void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.decrypt_cnt); - atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen); + crypto_schedule_test(larval); } - crypto_alg_put(alg); } -EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); -#endif static int __init crypto_algapi_init(void) { crypto_init_proc(); + crypto_start_tests(); return 0; } @@ -1272,8 +1109,13 @@ static void __exit crypto_algapi_exit(void) crypto_exit_proc(); } -module_init(crypto_algapi_init); +/* + * We run this at late_initcall so that all the built-in algorithms + * have had a chance to register themselves first. + */ +late_initcall(crypto_algapi_init); module_exit(crypto_algapi_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cryptographic algorithms API"); +MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/algboss.c b/crypto/algboss.c index 1814d2c5188a..846f586889ee 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data) { struct cryptomgr_param *param = data; struct crypto_template *tmpl; - int err; + int err = -ENOENT; tmpl = crypto_lookup_template(param->template); if (!tmpl) @@ -64,10 +64,12 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: + param->larval->adult = ERR_PTR(err); + param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD; complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_probe(struct crypto_larval *larval) @@ -138,9 +140,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) goto err_free_param; } - if (!i) - goto err_free_param; - param->tb[i + 1] = NULL; param->type.attr.rta_len = sizeof(param->type); @@ -175,29 +174,23 @@ static int cryptomgr_test(void *data) { struct crypto_test_param *param = data; u32 type = param->type; - int err = 0; - -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS - goto skiptest; -#endif - - if (type & CRYPTO_ALG_TESTED) - goto skiptest; + int err; err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED); -skiptest: crypto_alg_tested(param->driver, err); kfree(param); - module_put_and_exit(0); + module_put_and_kthread_exit(0); } static int cryptomgr_schedule_test(struct crypto_alg *alg) { struct task_struct *thread; struct crypto_test_param *param; - u32 type; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return NOTIFY_DONE; if (!try_module_get(THIS_MODULE)) goto err; @@ -208,13 +201,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver)); memcpy(param->alg, alg->cra_name, sizeof(param->alg)); - type = alg->cra_flags; - - /* Do not test internal algorithms. */ - if (type & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_TESTED; - - param->type = type; + param->type = alg->cra_flags; thread = kthread_run(cryptomgr_test, param, "cryptomgr_test"); if (IS_ERR(thread)) @@ -260,13 +247,7 @@ static void __exit cryptomgr_exit(void) BUG_ON(err); } -/* - * This is arch_initcall() so that the crypto self-tests are run on algorithms - * registered early by subsys_initcall(). subsys_initcall() is needed for - * generic implementations so that they're available for comparison tests when - * other implementations are registered later by module_init(). - */ -arch_initcall(cryptomgr_init); +module_init(cryptomgr_init); module_exit(cryptomgr_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 42493b4d8ce4..79b016a899a1 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -9,10 +9,10 @@ * The following concept of the memory management is used: * * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is - * filled by user space with the data submitted via sendpage/sendmsg. Filling - * up the TX SGL does not cause a crypto operation -- the data will only be - * tracked by the kernel. Upon receipt of one recvmsg call, the caller must - * provide a buffer which is tracked with the RX SGL. + * filled by user space with the data submitted via sendmsg (maybe with + * MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation + * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg + * call, the caller must provide a buffer which is tracked with the RX SGL. * * During the processing of the recvmsg operation, the cipher request is * allocated and prepared. As part of the recvmsg operation, the processed @@ -27,7 +27,6 @@ #include <crypto/scatterwalk.h> #include <crypto/if_alg.h> #include <crypto/skcipher.h> -#include <crypto/null.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> @@ -36,19 +35,13 @@ #include <linux/net.h> #include <net/sock.h> -struct aead_tfm { - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; -}; - static inline bool aead_sufficient_data(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int as = crypto_aead_authsize(tfm); /* @@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivsize = crypto_aead_ivsize(tfm); return af_alg_sendmsg(sock, msg, size, ivsize); } -static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, - struct scatterlist *src, - struct scatterlist *dst, unsigned int len) -{ - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); - - skcipher_request_set_sync_tfm(skreq, null_tfm); - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(skreq, src, dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; - struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm; + struct crypto_aead *tfm = pask->private; unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; struct af_alg_tsgl *tsgl, *tmp; @@ -113,19 +89,19 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, } /* - * Data length provided by caller via sendmsg/sendpage that has not - * yet been processed. + * Data length provided by caller via sendmsg that has not yet been + * processed. */ used = ctx->used; /* - * Make sure sufficient data is present -- note, the same check is - * also present in sendmsg/sendpage. The checks in sendpage/sendmsg - * shall provide an information to the data sender that something is - * wrong, but they are irrelevant to maintain the kernel integrity. - * We need this check here too in case user space decides to not honor - * the error message in sendmsg/sendpage and still call recvmsg. This - * check here protects the kernel integrity. + * Make sure sufficient data is present -- note, the same check is also + * present in sendmsg. The checks in sendmsg shall provide an + * information to the data sender that something is wrong, but they are + * irrelevant to maintain the kernel integrity. We need this check + * here too in case user space decides to not honor the error message + * in sendmsg and still call recvmsg. This check here protects the + * kernel integrity. */ if (!aead_sufficient_data(sk)) return -EINVAL; @@ -210,7 +186,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - rsgl_src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sgt.sgl; if (ctx->enc) { /* @@ -223,10 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, processed); - if (err) - goto free; + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, + processed); af_alg_pull_tsgl(sk, processed, NULL, 0); } else { /* @@ -240,11 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * RX SGL: AAD || CT ----+ */ - /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, outlen); - if (err) - goto free; + /* Copy AAD || CT to RX SGL buffer for in-place operation. */ + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen); /* Create TX SGL for tag and chain it to RX SGL. */ areq->tsgl_entries = af_alg_count_tsgl(sk, processed, @@ -267,10 +238,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, if (usedpages) { /* RX SGL present */ struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; + struct scatterlist *sg = sgl_prev->sgt.sgl; - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, - areq->tsgl); + sg_unmark_end(sg + sgl_prev->sgt.nents - 1); + sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ rsgl_src = areq->tsgl; @@ -278,7 +249,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, - areq->first_rsgl.sgl.sg, used, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); @@ -368,7 +339,6 @@ static struct proto_ops algif_aead_ops = { .release = af_alg_release, .sendmsg = aead_sendmsg, - .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, .poll = af_alg_poll, }; @@ -378,7 +348,7 @@ static int aead_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct aead_tfm *tfm; + struct crypto_aead *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -392,7 +362,7 @@ static int aead_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; atomic_dec(&pask->nokey_refcnt); @@ -420,18 +390,6 @@ static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, return aead_sendmsg(sock, msg, size); } -static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - int err; - - err = aead_check_key(sock); - if (err) - return err; - - return af_alg_sendpage(sock, page, offset, size, flags); -} - static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -459,61 +417,28 @@ static struct proto_ops algif_aead_ops_nokey = { .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, - .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, .poll = af_alg_poll, }; static void *aead_bind(const char *name, u32 type, u32 mask) { - struct aead_tfm *tfm; - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - aead = crypto_alloc_aead(name, type, mask); - if (IS_ERR(aead)) { - kfree(tfm); - return ERR_CAST(aead); - } - - null_tfm = crypto_get_default_null_skcipher(); - if (IS_ERR(null_tfm)) { - crypto_free_aead(aead); - kfree(tfm); - return ERR_CAST(null_tfm); - } - - tfm->aead = aead; - tfm->null_tfm = null_tfm; - - return tfm; + return crypto_alloc_aead(name, type, mask); } static void aead_release(void *private) { - struct aead_tfm *tfm = private; - - crypto_free_aead(tfm->aead); - crypto_put_default_null_skcipher(); - kfree(tfm); + crypto_free_aead(private); } static int aead_setauthsize(void *private, unsigned int authsize) { - struct aead_tfm *tfm = private; - - return crypto_aead_setauthsize(tfm->aead, authsize); + return crypto_aead_setauthsize(private, authsize); } static int aead_setkey(void *private, const u8 *key, unsigned int keylen) { - struct aead_tfm *tfm = private; - - return crypto_aead_setkey(tfm->aead, key, keylen); + return crypto_aead_setkey(private, key, keylen); } static void aead_sock_destruct(struct sock *sk) @@ -522,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk) struct af_alg_ctx *ctx = ask->private; struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); @@ -536,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) { struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - struct aead_tfm *tfm = private; - struct crypto_aead *aead = tfm->aead; + struct crypto_aead *tfm = private; unsigned int len = sizeof(*ctx); - unsigned int ivlen = crypto_aead_ivsize(aead); + unsigned int ivlen = crypto_aead_ivsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -566,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) static int aead_accept_parent(void *private, struct sock *sk) { - struct aead_tfm *tfm = private; + struct crypto_aead *tfm = private; - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return aead_accept_parent_nokey(private, sk); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 50f7b22f1b48..4d3dfc60a16a 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -63,121 +63,117 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) static int hash_sendmsg(struct socket *sock, struct msghdr *msg, size_t ignored) { - int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; - long copied = 0; + ssize_t copied = 0; + size_t len, max_pages, npages; + bool continuing, need_init = false; int err; - if (limit > sk->sk_sndbuf) - limit = sk->sk_sndbuf; + max_pages = min_t(size_t, ALG_MAX_PAGES, + DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE)); lock_sock(sk); - if (!ctx->more) { - if ((msg->msg_flags & MSG_MORE)) - hash_free_result(sk, ctx); + continuing = ctx->more; - err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); - if (err) - goto unlock; - } - - ctx->more = false; - - while (msg_data_left(msg)) { - int len = msg_data_left(msg); - - if (len > limit) - len = limit; - - len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len); - if (len < 0) { - err = copied ? 0 : len; - goto unlock; + if (!continuing) { + /* Discard a previous request that wasn't marked MSG_MORE. */ + hash_free_result(sk, ctx); + if (!msg_data_left(msg)) + goto done; /* Zero-length; don't start new req */ + need_init = true; + } else if (!msg_data_left(msg)) { + /* + * No data - finalise the prev req if MSG_MORE so any error + * comes out here. + */ + if (!(msg->msg_flags & MSG_MORE)) { + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock_free_result; + ahash_request_set_crypt(&ctx->req, NULL, + ctx->result, 0); + err = crypto_wait_req(crypto_ahash_final(&ctx->req), + &ctx->wait); + if (err) + goto unlock_free_result; } - - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); - - err = crypto_wait_req(crypto_ahash_update(&ctx->req), - &ctx->wait); - af_alg_free_sg(&ctx->sgl); - if (err) - goto unlock; - - copied += len; - iov_iter_advance(&msg->msg_iter, len); + goto done_more; } - err = 0; - - ctx->more = msg->msg_flags & MSG_MORE; - if (!ctx->more) { - err = hash_alloc_result(sk, ctx); - if (err) - goto unlock; - - ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = crypto_wait_req(crypto_ahash_final(&ctx->req), - &ctx->wait); - } + while (msg_data_left(msg)) { + ctx->sgl.sgt.sgl = ctx->sgl.sgl; + ctx->sgl.sgt.nents = 0; + ctx->sgl.sgt.orig_nents = 0; -unlock: - release_sock(sk); + err = -EIO; + npages = iov_iter_npages(&msg->msg_iter, max_pages); + if (npages == 0) + goto unlock_free; - return err ?: copied; -} + sg_init_table(ctx->sgl.sgl, npages); -static ssize_t hash_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct hash_ctx *ctx = ask->private; - int err; + ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter); - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; + err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX, + &ctx->sgl.sgt, npages, 0); + if (err < 0) + goto unlock_free; + len = err; + sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1); - lock_sock(sk); - sg_init_table(ctx->sgl.sg, 1); - sg_set_page(ctx->sgl.sg, page, size, offset); - - if (!(flags & MSG_MORE)) { - err = hash_alloc_result(sk, ctx); - if (err) - goto unlock; - } else if (!ctx->more) - hash_free_result(sk, ctx); + if (!msg_data_left(msg)) { + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock_free; + } - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); + ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, + ctx->result, len); - if (!(flags & MSG_MORE)) { - if (ctx->more) - err = crypto_ahash_finup(&ctx->req); - else + if (!msg_data_left(msg) && !continuing && + !(msg->msg_flags & MSG_MORE)) { err = crypto_ahash_digest(&ctx->req); - } else { - if (!ctx->more) { - err = crypto_ahash_init(&ctx->req); - err = crypto_wait_req(err, &ctx->wait); - if (err) - goto unlock; + } else { + if (need_init) { + err = crypto_wait_req( + crypto_ahash_init(&ctx->req), + &ctx->wait); + if (err) + goto unlock_free; + need_init = false; + } + + if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE)) + err = crypto_ahash_update(&ctx->req); + else + err = crypto_ahash_finup(&ctx->req); + continuing = true; } - err = crypto_ahash_update(&ctx->req); - } - - err = crypto_wait_req(err, &ctx->wait); - if (err) - goto unlock; + err = crypto_wait_req(err, &ctx->wait); + if (err) + goto unlock_free; - ctx->more = flags & MSG_MORE; + copied += len; + af_alg_free_sg(&ctx->sgl); + } +done_more: + ctx->more = msg->msg_flags & MSG_MORE; +done: + err = 0; unlock: release_sock(sk); + return copied ?: err; - return err ?: size; +unlock_free: + af_alg_free_sg(&ctx->sgl); +unlock_free_result: + hash_free_result(sk, ctx); + ctx->more = false; + goto unlock; } static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, @@ -227,31 +223,38 @@ unlock: return err ?: len; } -static int hash_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) +static int hash_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; struct ahash_request *req = &ctx->req; - char state[HASH_MAX_STATESIZE]; + struct crypto_ahash *tfm; struct sock *sk2; struct alg_sock *ask2; struct hash_ctx *ctx2; + char *state; bool more; int err; + tfm = crypto_ahash_reqtfm(req); + state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL); + err = -ENOMEM; + if (!state) + goto out; + lock_sock(sk); more = ctx->more; err = more ? crypto_ahash_export(req, state) : 0; release_sock(sk); if (err) - return err; + goto out_free_state; - err = af_alg_accept(ask->parent, newsock, kern); + err = af_alg_accept(ask->parent, newsock, arg); if (err) - return err; + goto out_free_state; sk2 = newsock->sk; ask2 = alg_sk(sk2); @@ -259,14 +262,14 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags, ctx2->more = more; if (!more) - return err; + goto out_free_state; err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - } +out_free_state: + kfree_sensitive(state); + +out: return err; } @@ -284,7 +287,6 @@ static struct proto_ops algif_hash_ops = { .release = af_alg_release, .sendmsg = hash_sendmsg, - .sendpage = hash_sendpage, .recvmsg = hash_recvmsg, .accept = hash_accept, }; @@ -336,18 +338,6 @@ static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg, return hash_sendmsg(sock, msg, size); } -static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - int err; - - err = hash_check_key(sock); - if (err) - return err; - - return hash_sendpage(sock, page, offset, size, flags); -} - static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -361,7 +351,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, } static int hash_accept_nokey(struct socket *sock, struct socket *newsock, - int flags, bool kern) + struct proto_accept_arg *arg) { int err; @@ -369,7 +359,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock, if (err) return err; - return hash_accept(sock, newsock, flags, kern); + return hash_accept(sock, newsock, arg); } static struct proto_ops algif_hash_ops_nokey = { @@ -386,7 +376,6 @@ static struct proto_ops algif_hash_ops_nokey = { .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, - .sendpage = hash_sendpage_nokey, .recvmsg = hash_recvmsg_nokey, .accept = hash_accept_nokey, }; @@ -427,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) if (!ctx) return -ENOMEM; - ctx->result = NULL; + memset(ctx, 0, len); ctx->len = len; - ctx->more = false; crypto_init_wait(&ctx->wait); ask->private = ctx; @@ -478,4 +466,5 @@ static void __exit algif_hash_exit(void) module_init(algif_hash_init); module_exit(algif_hash_exit); +MODULE_DESCRIPTION("Userspace interface for hash algorithms"); MODULE_LICENSE("GPL"); diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 407408c43730..1a86e40c8372 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -174,7 +174,6 @@ static struct proto_ops algif_rng_ops = { .bind = sock_no_bind, .accept = sock_no_accept, .sendmsg = sock_no_sendmsg, - .sendpage = sock_no_sendpage, .release = af_alg_release, .recvmsg = rng_recvmsg, @@ -192,7 +191,6 @@ static struct proto_ops __maybe_unused algif_rng_test_ops = { .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .sendpage = sock_no_sendpage, .release = af_alg_release, .recvmsg = rng_test_recvmsg, @@ -250,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk) if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->len = len; - ctx->addtl = NULL; - ctx->addtl_len = 0; /* * No seeding done at that point -- if multiple accepts are diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ee8890ee8f33..125d395c5e00 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -9,10 +9,10 @@ * The following concept of the memory management is used: * * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is - * filled by user space with the data submitted via sendpage/sendmsg. Filling - * up the TX SGL does not cause a crypto operation -- the data will only be - * tracked by the kernel. Upon receipt of one recvmsg call, the caller must - * provide a buffer which is tracked with the RX SGL. + * filled by user space with the data submitted via sendmsg. Filling up the TX + * SGL does not cause a crypto operation -- the data will only be tracked by + * the kernel. Upon receipt of one recvmsg call, the caller must provide a + * buffer which is tracked with the RX SGL. * * During the processing of the recvmsg operation, the cipher request is * allocated and prepared. As part of the recvmsg operation, the processed @@ -47,6 +47,52 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, return af_alg_sendmsg(sock, msg, size, ivsize); } +static int algif_skcipher_export(struct sock *sk, struct skcipher_request *req) +{ + struct alg_sock *ask = alg_sk(sk); + struct crypto_skcipher *tfm; + struct af_alg_ctx *ctx; + struct alg_sock *pask; + unsigned statesize; + struct sock *psk; + int err; + + if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL)) + return 0; + + ctx = ask->private; + psk = ask->parent; + pask = alg_sk(psk); + tfm = pask->private; + + statesize = crypto_skcipher_statesize(tfm); + ctx->state = sock_kmalloc(sk, statesize, GFP_ATOMIC); + if (!ctx->state) + return -ENOMEM; + + err = crypto_skcipher_export(req, ctx->state); + if (err) { + sock_kzfree_s(sk, ctx->state, statesize); + ctx->state = NULL; + } + + return err; +} + +static void algif_skcipher_done(void *data, int err) +{ + struct af_alg_async_req *areq = data; + struct sock *sk = areq->sk; + + if (err) + goto out; + + err = algif_skcipher_export(sk, &areq->cra_u.skcipher_req); + +out: + af_alg_async_cb(data, err); +} + static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -58,6 +104,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, struct crypto_skcipher *tfm = pask->private; unsigned int bs = crypto_skcipher_chunksize(tfm); struct af_alg_async_req *areq; + unsigned cflags = 0; int err = 0; size_t len = 0; @@ -82,8 +129,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, * If more buffers are to be expected to be processed, process only * full block size buffers. */ - if (ctx->more || len < ctx->used) + if (ctx->more || len < ctx->used) { len -= len % bs; + cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL; + } /* * Create a per request TX SGL for this request which tracks the @@ -105,7 +154,17 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, - areq->first_rsgl.sgl.sg, len, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv); + + if (ctx->state) { + err = crypto_skcipher_import(&areq->cra_u.skcipher_req, + ctx->state); + sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm)); + ctx->state = NULL; + if (err) + goto free; + cflags |= CRYPTO_SKCIPHER_REQ_CONT; + } if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ @@ -116,8 +175,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, areq->outlen = len; skcipher_request_set_callback(&areq->cra_u.skcipher_req, + cflags | CRYPTO_TFM_REQ_MAY_SLEEP, - af_alg_async_cb, areq); + algif_skcipher_done, areq); err = ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); @@ -130,6 +190,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, } else { /* Synchronous operation */ skcipher_request_set_callback(&areq->cra_u.skcipher_req, + cflags | CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &ctx->wait); @@ -137,8 +198,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), &ctx->wait); - } + if (!err) + err = algif_skcipher_export( + sk, &areq->cra_u.skcipher_req); + } free: af_alg_free_resources(areq); @@ -194,7 +258,6 @@ static struct proto_ops algif_skcipher_ops = { .release = af_alg_release, .sendmsg = skcipher_sendmsg, - .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, .poll = af_alg_poll, }; @@ -246,18 +309,6 @@ static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, return skcipher_sendmsg(sock, msg, size); } -static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - int err; - - err = skcipher_check_key(sock); - if (err) - return err; - - return af_alg_sendpage(sock, page, offset, size, flags); -} - static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -285,7 +336,6 @@ static struct proto_ops algif_skcipher_ops_nokey = { .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, - .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, .poll = af_alg_poll, }; @@ -315,6 +365,8 @@ static void skcipher_sock_destruct(struct sock *sk) af_alg_pull_tsgl(sk, ctx->used, NULL, 0); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); + if (ctx->state) + sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } @@ -385,4 +437,5 @@ static void __exit algif_skcipher_exit(void) module_init(algif_skcipher_init); module_exit(algif_skcipher_exit); +MODULE_DESCRIPTION("Userspace interface for skcipher algorithms"); MODULE_LICENSE("GPL"); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c deleted file mode 100644 index 3f512efaba3a..000000000000 --- a/crypto/ansi_cprng.c +++ /dev/null @@ -1,474 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * PRNG: Pseudo Random Number Generator - * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using - * AES 128 cipher - * - * (C) Neil Horman <nhorman@tuxdriver.com> - */ - -#include <crypto/internal/cipher.h> -#include <crypto/internal/rng.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/string.h> - -#define DEFAULT_PRNG_KEY "0123456789abcdef" -#define DEFAULT_PRNG_KSZ 16 -#define DEFAULT_BLK_SZ 16 -#define DEFAULT_V_SEED "zaybxcwdveuftgsh" - -/* - * Flags for the prng_context flags field - */ - -#define PRNG_FIXED_SIZE 0x1 -#define PRNG_NEED_RESET 0x2 - -/* - * Note: DT is our counter value - * I is our intermediate value - * V is our seed vector - * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf - * for implementation details - */ - - -struct prng_context { - spinlock_t prng_lock; - unsigned char rand_data[DEFAULT_BLK_SZ]; - unsigned char last_rand_data[DEFAULT_BLK_SZ]; - unsigned char DT[DEFAULT_BLK_SZ]; - unsigned char I[DEFAULT_BLK_SZ]; - unsigned char V[DEFAULT_BLK_SZ]; - u32 rand_data_valid; - struct crypto_cipher *tfm; - u32 flags; -}; - -static int dbg; - -static void hexdump(char *note, unsigned char *buf, unsigned int len) -{ - if (dbg) { - printk(KERN_CRIT "%s", note); - print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, - 16, 1, - buf, len, false); - } -} - -#define dbgprint(format, args...) do {\ -if (dbg)\ - printk(format, ##args);\ -} while (0) - -static void xor_vectors(unsigned char *in1, unsigned char *in2, - unsigned char *out, unsigned int size) -{ - int i; - - for (i = 0; i < size; i++) - out[i] = in1[i] ^ in2[i]; - -} -/* - * Returns DEFAULT_BLK_SZ bytes of random data per call - * returns 0 if generation succeeded, <0 if something went wrong - */ -static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) -{ - int i; - unsigned char tmp[DEFAULT_BLK_SZ]; - unsigned char *output = NULL; - - - dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n", - ctx); - - hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); - hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); - hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); - - /* - * This algorithm is a 3 stage state machine - */ - for (i = 0; i < 3; i++) { - - switch (i) { - case 0: - /* - * Start by encrypting the counter value - * This gives us an intermediate value I - */ - memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); - output = ctx->I; - hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); - break; - case 1: - - /* - * Next xor I with our secret vector V - * encrypt that result to obtain our - * pseudo random data which we output - */ - xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); - hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); - output = ctx->rand_data; - break; - case 2: - /* - * First check that we didn't produce the same - * random data that we did last time around through this - */ - if (!memcmp(ctx->rand_data, ctx->last_rand_data, - DEFAULT_BLK_SZ)) { - if (cont_test) { - panic("cprng %p Failed repetition check!\n", - ctx); - } - - printk(KERN_ERR - "ctx %p Failed repetition check!\n", - ctx); - - ctx->flags |= PRNG_NEED_RESET; - return -EINVAL; - } - memcpy(ctx->last_rand_data, ctx->rand_data, - DEFAULT_BLK_SZ); - - /* - * Lastly xor the random data with I - * and encrypt that to obtain a new secret vector V - */ - xor_vectors(ctx->rand_data, ctx->I, tmp, - DEFAULT_BLK_SZ); - output = ctx->V; - hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); - break; - } - - - /* do the encryption */ - crypto_cipher_encrypt_one(ctx->tfm, output, tmp); - - } - - /* - * Now update our DT value - */ - for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) { - ctx->DT[i] += 1; - if (ctx->DT[i] != 0) - break; - } - - dbgprint("Returning new block for context %p\n", ctx); - ctx->rand_data_valid = 0; - - hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); - hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); - hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); - hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); - - return 0; -} - -/* Our exported functions */ -static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, - int do_cont_test) -{ - unsigned char *ptr = buf; - unsigned int byte_count = (unsigned int)nbytes; - int err; - - - spin_lock_bh(&ctx->prng_lock); - - err = -EINVAL; - if (ctx->flags & PRNG_NEED_RESET) - goto done; - - /* - * If the FIXED_SIZE flag is on, only return whole blocks of - * pseudo random data - */ - err = -EINVAL; - if (ctx->flags & PRNG_FIXED_SIZE) { - if (nbytes < DEFAULT_BLK_SZ) - goto done; - byte_count = DEFAULT_BLK_SZ; - } - - /* - * Return 0 in case of success as mandated by the kernel - * crypto API interface definition. - */ - err = 0; - - dbgprint(KERN_CRIT "getting %d random bytes for context %p\n", - byte_count, ctx); - - -remainder: - if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { - memset(buf, 0, nbytes); - err = -EINVAL; - goto done; - } - } - - /* - * Copy any data less than an entire block - */ - if (byte_count < DEFAULT_BLK_SZ) { -empty_rbuf: - while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { - *ptr = ctx->rand_data[ctx->rand_data_valid]; - ptr++; - byte_count--; - ctx->rand_data_valid++; - if (byte_count == 0) - goto done; - } - } - - /* - * Now copy whole blocks - */ - for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { - if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { - memset(buf, 0, nbytes); - err = -EINVAL; - goto done; - } - } - if (ctx->rand_data_valid > 0) - goto empty_rbuf; - memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); - ctx->rand_data_valid += DEFAULT_BLK_SZ; - ptr += DEFAULT_BLK_SZ; - } - - /* - * Now go back and get any remaining partial block - */ - if (byte_count) - goto remainder; - -done: - spin_unlock_bh(&ctx->prng_lock); - dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", - err, ctx); - return err; -} - -static void free_prng_context(struct prng_context *ctx) -{ - crypto_free_cipher(ctx->tfm); -} - -static int reset_prng_context(struct prng_context *ctx, - const unsigned char *key, size_t klen, - const unsigned char *V, const unsigned char *DT) -{ - int ret; - const unsigned char *prng_key; - - spin_lock_bh(&ctx->prng_lock); - ctx->flags |= PRNG_NEED_RESET; - - prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; - - if (!key) - klen = DEFAULT_PRNG_KSZ; - - if (V) - memcpy(ctx->V, V, DEFAULT_BLK_SZ); - else - memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ); - - if (DT) - memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); - else - memset(ctx->DT, 0, DEFAULT_BLK_SZ); - - memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); - memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); - - ctx->rand_data_valid = DEFAULT_BLK_SZ; - - ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); - if (ret) { - dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", - crypto_cipher_get_flags(ctx->tfm)); - goto out; - } - - ret = 0; - ctx->flags &= ~PRNG_NEED_RESET; -out: - spin_unlock_bh(&ctx->prng_lock); - return ret; -} - -static int cprng_init(struct crypto_tfm *tfm) -{ - struct prng_context *ctx = crypto_tfm_ctx(tfm); - - spin_lock_init(&ctx->prng_lock); - ctx->tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tfm)) { - dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", - ctx); - return PTR_ERR(ctx->tfm); - } - - if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) - return -EINVAL; - - /* - * after allocation, we should always force the user to reset - * so they don't inadvertently use the insecure default values - * without specifying them intentially - */ - ctx->flags |= PRNG_NEED_RESET; - return 0; -} - -static void cprng_exit(struct crypto_tfm *tfm) -{ - free_prng_context(crypto_tfm_ctx(tfm)); -} - -static int cprng_get_random(struct crypto_rng *tfm, - const u8 *src, unsigned int slen, - u8 *rdata, unsigned int dlen) -{ - struct prng_context *prng = crypto_rng_ctx(tfm); - - return get_prng_bytes(rdata, dlen, prng, 0); -} - -/* - * This is the cprng_registered reset method the seed value is - * interpreted as the tuple { V KEY DT} - * V and KEY are required during reset, and DT is optional, detected - * as being present by testing the length of the seed - */ -static int cprng_reset(struct crypto_rng *tfm, - const u8 *seed, unsigned int slen) -{ - struct prng_context *prng = crypto_rng_ctx(tfm); - const u8 *key = seed + DEFAULT_BLK_SZ; - const u8 *dt = NULL; - - if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) - return -EINVAL; - - if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ)) - dt = key + DEFAULT_PRNG_KSZ; - - reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt); - - if (prng->flags & PRNG_NEED_RESET) - return -EINVAL; - return 0; -} - -#ifdef CONFIG_CRYPTO_FIPS -static int fips_cprng_get_random(struct crypto_rng *tfm, - const u8 *src, unsigned int slen, - u8 *rdata, unsigned int dlen) -{ - struct prng_context *prng = crypto_rng_ctx(tfm); - - return get_prng_bytes(rdata, dlen, prng, 1); -} - -static int fips_cprng_reset(struct crypto_rng *tfm, - const u8 *seed, unsigned int slen) -{ - u8 rdata[DEFAULT_BLK_SZ]; - const u8 *key = seed + DEFAULT_BLK_SZ; - int rc; - - struct prng_context *prng = crypto_rng_ctx(tfm); - - if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) - return -EINVAL; - - /* fips strictly requires seed != key */ - if (!memcmp(seed, key, DEFAULT_PRNG_KSZ)) - return -EINVAL; - - rc = cprng_reset(tfm, seed, slen); - - if (!rc) - goto out; - - /* this primes our continuity test */ - rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); - prng->rand_data_valid = DEFAULT_BLK_SZ; - -out: - return rc; -} -#endif - -static struct rng_alg rng_algs[] = { { - .generate = cprng_get_random, - .seed = cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, - .base = { - .cra_name = "stdrng", - .cra_driver_name = "ansi_cprng", - .cra_priority = 100, - .cra_ctxsize = sizeof(struct prng_context), - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - } -#ifdef CONFIG_CRYPTO_FIPS -}, { - .generate = fips_cprng_get_random, - .seed = fips_cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, - .base = { - .cra_name = "fips(ansi_cprng)", - .cra_driver_name = "fips_ansi_cprng", - .cra_priority = 300, - .cra_ctxsize = sizeof(struct prng_context), - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - } -#endif -} }; - -/* Module initalization */ -static int __init prng_mod_init(void) -{ - return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); -} - -static void __exit prng_mod_fini(void) -{ - crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); -} - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); -MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); -module_param(dbg, int, 0); -MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); -subsys_initcall(prng_mod_init); -module_exit(prng_mod_fini); -MODULE_ALIAS_CRYPTO("stdrng"); -MODULE_ALIAS_CRYPTO("ansi_cprng"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/anubis.c b/crypto/anubis.c index 5da0241ef453..4b01b6ec961a 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -29,11 +29,11 @@ * */ +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> -#include <linux/crypto.h> +#include <linux/unaligned.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 @@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; @@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) - kappa[i] = be32_to_cpu(key[i]); + kappa[i] = get_unaligned_be32(&in_key[4 * i]); /* * generate R + 1 round keys: @@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], - u8 *ciphertext, const u8 *plaintext, const int R) + u8 *dst, const u8 *src, const int R) { - const __be32 *src = (const __be32 *)plaintext; - __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; @@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) - state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; + state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i]; /* * R - 1 full rounds: @@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], */ for (i = 0; i < 4; i++) - dst[i] = cpu_to_be32(inter[i]); + put_unaligned_be32(inter[i], &dst[4 * i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, @@ -687,10 +683,7 @@ static struct crypto_alg anubis_alg = { static int __init anubis_mod_init(void) { - int ret = 0; - - ret = crypto_register_alg(&anubis_alg); - return ret; + return crypto_register_alg(&anubis_alg); } static void __exit anubis_mod_fini(void) @@ -698,7 +691,7 @@ static void __exit anubis_mod_fini(void) crypto_unregister_alg(&anubis_alg); } -subsys_initcall(anubis_mod_init); +module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/api.c b/crypto/api.c index c4eda56cff89..5724d62e9d07 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -12,6 +12,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> @@ -30,7 +31,14 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) +DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); +#endif + +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask); +static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, + u32 mask); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { @@ -47,11 +55,6 @@ void crypto_mod_put(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_mod_put); -static inline int crypto_is_test_larval(struct crypto_larval *larval) -{ - return larval->alg.cra_driver_name[0]; -} - static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) { @@ -67,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, if ((q->cra_flags ^ type) & mask) continue; - if (crypto_is_larval(q) && - !crypto_is_test_larval((struct crypto_larval *)q) && - ((struct crypto_larval *)q)->mask != mask) - continue; - exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) @@ -110,12 +108,14 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) if (!larval) return ERR_PTR(-ENOMEM); + type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK); + larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; larval->alg.cra_destroy = crypto_larval_destroy; - strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); + strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); init_completion(&larval->completion); return larval; @@ -145,54 +145,114 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); } return alg; } -void crypto_larval_kill(struct crypto_alg *alg) +static void crypto_larval_kill(struct crypto_larval *larval) { - struct crypto_larval *larval = (void *)alg; + bool unlinked; down_write(&crypto_alg_sem); - list_del(&alg->cra_list); + unlinked = list_empty(&larval->alg.cra_list); + if (!unlinked) + list_del_init(&larval->alg.cra_list); up_write(&crypto_alg_sem); + + if (unlinked) + return; + complete_all(&larval->completion); - crypto_alg_put(alg); + crypto_alg_put(&larval->alg); } -EXPORT_SYMBOL_GPL(crypto_larval_kill); -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +void crypto_schedule_test(struct crypto_larval *larval) { - struct crypto_larval *larval = (void *)alg; - long timeout; + int err; + + err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); + WARN_ON_ONCE(err != NOTIFY_STOP); +} +EXPORT_SYMBOL_GPL(crypto_schedule_test); + +static void crypto_start_test(struct crypto_larval *larval) +{ + if (!crypto_is_test_larval(larval)) + return; + + if (larval->test_started) + return; + + down_write(&crypto_alg_sem); + if (larval->test_started) { + up_write(&crypto_alg_sem); + return; + } - timeout = wait_for_completion_killable_timeout( + larval->test_started = true; + up_write(&crypto_alg_sem); + + crypto_schedule_test(larval); +} + +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask) +{ + struct crypto_larval *larval; + long time_left; + +again: + larval = container_of(alg, struct crypto_larval, alg); + + if (!crypto_boot_test_finished()) + crypto_start_test(larval); + + time_left = wait_for_completion_killable_timeout( &larval->completion, 60 * HZ); alg = larval->adult; - if (timeout < 0) + if (time_left < 0) alg = ERR_PTR(-EINTR); - else if (!timeout) + else if (!time_left) { + if (crypto_is_test_larval(larval)) + crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - else if (!alg) - alg = ERR_PTR(-ENOENT); - else if (IS_ERR(alg)) + } else if (!alg || PTR_ERR(alg) == -EEXIST) { + int err = alg ? -EEXIST : -EAGAIN; + + /* + * EEXIST is expected because two probes can be scheduled + * at the same time with one using alg_name and the other + * using driver_name. Do a re-lookup but do not retry in + * case we hit a quirk like gcm_base(ctr(aes),...) which + * will never match. + */ + alg = &larval->alg; + alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: + ERR_PTR(err); + } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); + else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) + alg = ERR_PTR(-EAGAIN); else if (!crypto_mod_get(alg)) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); + if (!IS_ERR(alg) && crypto_is_larval(alg)) + goto again; + return alg; } static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) { + const u32 fips = CRYPTO_ALG_FIPS_INTERNAL; struct crypto_alg *alg; u32 test = 0; @@ -200,8 +260,20 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, test |= CRYPTO_ALG_TESTED; down_read(&crypto_alg_sem); - alg = __crypto_alg_lookup(name, type | test, mask | test); - if (!alg && test) { + alg = __crypto_alg_lookup(name, (type | test) & ~fips, + (mask | test) & ~fips); + if (alg) { + if (((type | mask) ^ fips) & fips) + mask |= fips; + mask &= fips; + + if (!crypto_is_larval(alg) && + ((type ^ alg->cra_flags) & mask)) { + /* Algorithm is disallowed in FIPS mode. */ + crypto_mod_put(alg); + alg = ERR_PTR(-ENOENT); + } + } else if (test) { alg = __crypto_alg_lookup(name, type, mask); if (alg && !crypto_is_larval(alg)) { /* Test failed */ @@ -237,9 +309,13 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, } if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); - else if (!alg) + alg = crypto_larval_wait(alg, type, mask); + else if (alg) + ; + else if (!(mask & CRYPTO_ALG_TESTED)) alg = crypto_larval_add(name, type, mask); + else + alg = ERR_PTR(-ENOENT); return alg; } @@ -266,7 +342,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) /* * If the internal flag is set for a cipher, require a caller to - * to invoke the cipher with the internal flag to use that cipher. + * invoke the cipher with the internal flag to use that cipher. * Also, if a caller wants to allocate a cipher that may or may * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and * !(mask & CRYPTO_ALG_INTERNAL). @@ -281,25 +357,16 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) - alg = crypto_larval_wait(larval); + alg = crypto_larval_wait(larval, type, mask); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } - crypto_larval_kill(larval); + crypto_larval_kill(container_of(larval, struct crypto_larval, alg)); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); -static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) -{ - const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; - - if (type_obj) - return type_obj->init(tfm, type, mask); - return 0; -} - static void crypto_exit_ops(struct crypto_tfm *tfm) { const struct crypto_type *type = tfm->__crt_alg->cra_type; @@ -324,10 +391,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; - - case CRYPTO_ALG_TYPE_COMPRESS: - len += crypto_compress_ctxsize(alg); - break; } return len; @@ -341,23 +404,20 @@ void crypto_shoot_alg(struct crypto_alg *alg) } EXPORT_SYMBOL_GPL(crypto_shoot_alg); -struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, - u32 mask) +struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, + u32 mask, gfp_t gfp) { - struct crypto_tfm *tfm = NULL; + struct crypto_tfm *tfm; unsigned int tfm_size; int err = -ENOMEM; tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); - tfm = kzalloc(tfm_size, GFP_KERNEL); + tfm = kzalloc(tfm_size, gfp); if (tfm == NULL) goto out_err; tfm->__crt_alg = alg; - - err = crypto_init_ops(tfm, type, mask); - if (err) - goto out_free_tfm; + refcount_set(&tfm->refcnt, 1); if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; @@ -366,7 +426,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, cra_init_failed: crypto_exit_ops(tfm); -out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(tfm); @@ -375,6 +434,13 @@ out_err: out: return tfm; } +EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp); + +struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); /* @@ -433,26 +499,44 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm_node(struct crypto_alg *alg, - const struct crypto_type *frontend, - int node) +static void *crypto_alloc_tfmmem(struct crypto_alg *alg, + const struct crypto_type *frontend, int node, + gfp_t gfp) { - char *mem; - struct crypto_tfm *tfm = NULL; + struct crypto_tfm *tfm; unsigned int tfmsize; unsigned int total; - int err = -ENOMEM; + char *mem; tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc_node(total, GFP_KERNEL, node); + mem = kzalloc_node(total, gfp, node); if (mem == NULL) - goto out_err; + return ERR_PTR(-ENOMEM); tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; tfm->node = node; + refcount_set(&tfm->refcnt, 1); + + return mem; +} + +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) +{ + struct crypto_tfm *tfm; + char *mem; + int err; + + mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL); + if (IS_ERR(mem)) + goto out; + + tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); + tfm->fb = tfm; err = frontend->init_tfm(tfm); if (err) @@ -469,13 +553,38 @@ out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(mem); -out_err: mem = ERR_PTR(err); out: return mem; } EXPORT_SYMBOL_GPL(crypto_create_tfm_node); +void *crypto_clone_tfm(const struct crypto_type *frontend, + struct crypto_tfm *otfm) +{ + struct crypto_alg *alg = otfm->__crt_alg; + struct crypto_tfm *tfm; + char *mem; + + mem = ERR_PTR(-ESTALE); + if (unlikely(!crypto_mod_get(alg))) + goto out; + + mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC); + if (IS_ERR(mem)) { + crypto_mod_put(alg); + goto out; + } + + tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); + tfm->crt_flags = otfm->crt_flags; + tfm->fb = tfm; + +out: + return mem; +} +EXPORT_SYMBOL_GPL(crypto_clone_tfm); + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) @@ -565,6 +674,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) if (IS_ERR_OR_NULL(mem)) return; + if (!refcount_dec_and_test(&tfm->refcnt)) + return; alg = tfm->__crt_alg; if (!tfm->exit && alg->cra_exit) @@ -589,9 +700,9 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); -void crypto_req_done(struct crypto_async_request *req, int err) +void crypto_req_done(void *data, int err) { - struct crypto_wait *wait = req->data; + struct crypto_wait *wait = data; if (err == -EINPROGRESS) return; @@ -601,6 +712,31 @@ void crypto_req_done(struct crypto_async_request *req, int err) } EXPORT_SYMBOL_GPL(crypto_req_done); +void crypto_destroy_alg(struct crypto_alg *alg) +{ + if (alg->cra_type && alg->cra_type->destroy) + alg->cra_type->destroy(alg); + if (alg->cra_destroy) + alg->cra_destroy(alg); +} +EXPORT_SYMBOL_GPL(crypto_destroy_alg); + +struct crypto_async_request *crypto_request_clone( + struct crypto_async_request *req, size_t total, gfp_t gfp) +{ + struct crypto_tfm *tfm = req->tfm; + struct crypto_async_request *nreq; + + nreq = kmemdup(req, total, gfp); + if (!nreq) { + req->tfm = tfm->fb; + return req; + } + + nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK; + return nreq; +} +EXPORT_SYMBOL_GPL(crypto_request_clone); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/arc4.c b/crypto/arc4.c index 3254dcc34368..1608018111d0 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -7,7 +7,6 @@ * Jon Oberheide <jon@oberheide.org> */ -#include <crypto/algapi.h> #include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <linux/init.h> @@ -15,33 +14,31 @@ #include <linux/module.h> #include <linux/sched.h> -static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, +#define ARC4_ALIGN __alignof__(struct arc4_ctx) + +static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); return arc4_setkey(ctx, in_key, key_len); } -static int crypto_arc4_crypt(struct skcipher_request *req) +static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned nbytes, u8 *siv, u32 flags) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - int err; + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); - err = skcipher_walk_virt(&walk, req, false); + if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT)) + memcpy(siv, ctx, sizeof(*ctx)); - while (walk.nbytes > 0) { - arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } + ctx = (struct arc4_ctx *)siv; - return err; + arc4_crypt(ctx, dst, src, nbytes); + return 0; } -static int crypto_arc4_init(struct crypto_skcipher *tfm) +static int crypto_arc4_init(struct crypto_lskcipher *tfm) { pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n", current->comm, (unsigned long)current->pid); @@ -49,36 +46,34 @@ static int crypto_arc4_init(struct crypto_skcipher *tfm) return 0; } -static struct skcipher_alg arc4_alg = { - /* - * For legacy reasons, this is named "ecb(arc4)", not "arc4". - * Nevertheless it's actually a stream cipher, not a block cipher. - */ - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb(arc4)-generic", - .base.cra_priority = 100, - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct arc4_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = crypto_arc4_setkey, - .encrypt = crypto_arc4_crypt, - .decrypt = crypto_arc4_crypt, - .init = crypto_arc4_init, +static struct lskcipher_alg arc4_alg = { + .co.base.cra_name = "arc4", + .co.base.cra_driver_name = "arc4-generic", + .co.base.cra_priority = 100, + .co.base.cra_blocksize = ARC4_BLOCK_SIZE, + .co.base.cra_ctxsize = sizeof(struct arc4_ctx), + .co.base.cra_alignmask = ARC4_ALIGN - 1, + .co.base.cra_module = THIS_MODULE, + .co.min_keysize = ARC4_MIN_KEY_SIZE, + .co.max_keysize = ARC4_MAX_KEY_SIZE, + .co.statesize = sizeof(struct arc4_ctx), + .setkey = crypto_arc4_setkey, + .encrypt = crypto_arc4_crypt, + .decrypt = crypto_arc4_crypt, + .init = crypto_arc4_init, }; static int __init arc4_init(void) { - return crypto_register_skcipher(&arc4_alg); + return crypto_register_lskcipher(&arc4_alg); } static void __exit arc4_exit(void) { - crypto_unregister_skcipher(&arc4_alg); + crypto_unregister_lskcipher(&arc4_alg); } -subsys_initcall(arc4_init); +module_init(arc4_init); module_exit(arc4_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c new file mode 100644 index 000000000000..faa7900383f6 --- /dev/null +++ b/crypto/aria_generic.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#include <crypto/aria.h> +#include <linux/unaligned.h> + +static const u32 key_rc[20] = { + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0, + 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e, + 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, + 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 +}; + +static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + u32 w0[4], w1[4], w2[4], w3[4]; + u32 reg0, reg1, reg2, reg3; + const u32 *ck; + int rkidx = 0; + + ck = &key_rc[(key_len - 16) / 2]; + + w0[0] = get_unaligned_be32(&in_key[0]); + w0[1] = get_unaligned_be32(&in_key[4]); + w0[2] = get_unaligned_be32(&in_key[8]); + w0[3] = get_unaligned_be32(&in_key[12]); + + reg0 = w0[0] ^ ck[0]; + reg1 = w0[1] ^ ck[1]; + reg2 = w0[2] ^ ck[2]; + reg3 = w0[3] ^ ck[3]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + if (key_len > 16) { + w1[0] = get_unaligned_be32(&in_key[16]); + w1[1] = get_unaligned_be32(&in_key[20]); + if (key_len > 24) { + w1[2] = get_unaligned_be32(&in_key[24]); + w1[3] = get_unaligned_be32(&in_key[28]); + } else { + w1[2] = 0; + w1[3] = 0; + } + } else { + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + } + + w1[0] ^= reg0; + w1[1] ^= reg1; + w1[2] ^= reg2; + w1[3] ^= reg3; + + reg0 = w1[0]; + reg1 = w1[1]; + reg2 = w1[2]; + reg3 = w1[3]; + + reg0 ^= ck[4]; + reg1 ^= ck[5]; + reg2 ^= ck[6]; + reg3 ^= ck[7]; + + aria_subst_diff_even(®0, ®1, ®2, ®3); + + reg0 ^= w0[0]; + reg1 ^= w0[1]; + reg2 ^= w0[2]; + reg3 ^= w0[3]; + + w2[0] = reg0; + w2[1] = reg1; + w2[2] = reg2; + w2[3] = reg3; + + reg0 ^= ck[8]; + reg1 ^= ck[9]; + reg2 ^= ck[10]; + reg3 ^= ck[11]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + w3[0] = reg0 ^ w1[0]; + w3[1] = reg1 ^ w1[1]; + w3[2] = reg2 ^ w1[2]; + w3[3] = reg3 ^ w1[3]; + + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97); + if (key_len > 16) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97); + + if (key_len > 24) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109); + } + } +} + +static void aria_set_decrypt_key(struct aria_ctx *ctx) +{ + int i; + + for (i = 0; i < 4; i++) { + ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i]; + ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i]; + } + + for (i = 1; i < ctx->rounds; i++) { + ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]); + ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]); + ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]); + ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]); + + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_byte(&ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + } +} + +int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + if (key_len != 16 && key_len != 24 && key_len != 32) + return -EINVAL; + + BUILD_BUG_ON(sizeof(ctx->enc_key) != 272); + BUILD_BUG_ON(sizeof(ctx->dec_key) != 272); + BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds)); + + ctx->key_length = key_len; + ctx->rounds = (key_len + 32) / 4; + + aria_set_encrypt_key(ctx, in_key, key_len); + aria_set_decrypt_key(ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(aria_set_key); + +static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, + u32 key[][ARIA_RD_KEY_WORDS]) +{ + u32 reg0, reg1, reg2, reg3; + int rounds, rkidx = 0; + + rounds = ctx->rounds; + + reg0 = get_unaligned_be32(&in[0]); + reg1 = get_unaligned_be32(&in[4]); + reg2 = get_unaligned_be32(&in[8]); + reg3 = get_unaligned_be32(&in[12]); + + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + while ((rounds -= 2) > 0) { + aria_subst_diff_even(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + } + + reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]), + (u8)(x2[get_u8(reg0, 1)] >> 8), + (u8)(s1[get_u8(reg0, 2)]), + (u8)(s2[get_u8(reg0, 3)])); + reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]), + (u8)(x2[get_u8(reg1, 1)] >> 8), + (u8)(s1[get_u8(reg1, 2)]), + (u8)(s2[get_u8(reg1, 3)])); + reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]), + (u8)(x2[get_u8(reg2, 1)] >> 8), + (u8)(s1[get_u8(reg2, 2)]), + (u8)(s2[get_u8(reg2, 3)])); + reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]), + (u8)(x2[get_u8(reg3, 1)] >> 8), + (u8)(s1[get_u8(reg3, 2)]), + (u8)(s2[get_u8(reg3, 3)])); + + put_unaligned_be32(reg0, &out[0]); + put_unaligned_be32(reg1, &out[4]); + put_unaligned_be32(reg2, &out[8]); + put_unaligned_be32(reg3, &out[12]); +} + +void aria_encrypt(void *_ctx, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; + + __aria_crypt(ctx, out, in, ctx->enc_key); +} +EXPORT_SYMBOL_GPL(aria_encrypt); + +void aria_decrypt(void *_ctx, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = (struct aria_ctx *)_ctx; + + __aria_crypt(ctx, out, in, ctx->dec_key); +} +EXPORT_SYMBOL_GPL(aria_decrypt); + +static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->enc_key); +} + +static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->dec_key); +} + +static struct crypto_alg aria_alg = { + .cra_name = "aria", + .cra_driver_name = "aria-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = ARIA_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aria_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = ARIA_MIN_KEY_SIZE, + .cia_max_keysize = ARIA_MAX_KEY_SIZE, + .cia_setkey = aria_set_key, + .cia_encrypt = __aria_encrypt, + .cia_decrypt = __aria_decrypt + } + } +}; + +static int __init aria_init(void) +{ + return crypto_register_alg(&aria_alg); +} + +static void __exit aria_fini(void) +{ + crypto_unregister_alg(&aria_alg); +} + +module_init(aria_init); +module_exit(aria_fini); + +MODULE_DESCRIPTION("ARIA Cipher Algorithm"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); +MODULE_ALIAS_CRYPTO("aria"); +MODULE_ALIAS_CRYPTO("aria-generic"); diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 1f1f004dc757..e1345b8f39f1 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB select CRYPTO_HASH_INFO select CRYPTO_AKCIPHER + select CRYPTO_SIG select CRYPTO_HASH help This option provides support for asymmetric public key type handling. @@ -22,18 +23,6 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE appropriate hash algorithms (such as SHA-1) must be available. ENOPKG will be reported if the requisite algorithm is unavailable. -config ASYMMETRIC_TPM_KEY_SUBTYPE - tristate "Asymmetric TPM backed private key subtype" - depends on TCG_TPM - depends on TRUSTED_KEYS - select CRYPTO_HMAC - select CRYPTO_SHA1 - select CRYPTO_HASH_INFO - help - This option provides support for TPM backed private key type handling. - Operations such as sign, verify, encrypt, decrypt are performed by - the TPM after the private key is loaded. - config X509_CERTIFICATE_PARSER tristate "X.509 certificate parser" depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -54,15 +43,6 @@ config PKCS8_PRIVATE_KEY_PARSER private key data and provides the ability to instantiate a crypto key from that data. -config TPM_KEY_PARSER - tristate "TPM private key parser" - depends on ASYMMETRIC_TPM_KEY_SUBTYPE - select ASN1 - help - This option provides support for parsing TPM format blobs for - private key data and provides the ability to instantiate a crypto key - from that data. - config PKCS7_MESSAGE_PARSER tristate "PKCS#7 message parser" depends on X509_CERTIFICATE_PARSER @@ -96,4 +76,31 @@ config SIGNED_PE_FILE_VERIFICATION This option provides support for verifying the signature(s) on a signed PE binary. +config FIPS_SIGNATURE_SELFTEST + tristate "Run FIPS selftests on the X.509+PKCS7 signature verification" + help + This option causes some selftests to be run on the signature + verification code, using some built in data. This is required + for FIPS. + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER + depends on X509_CERTIFICATE_PARSER + depends on CRYPTO_RSA + depends on CRYPTO_SHA256 + +config FIPS_SIGNATURE_SELFTEST_RSA + bool + default y + depends on FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_RSA=y || CRYPTO_RSA=FIPS_SIGNATURE_SELFTEST + +config FIPS_SIGNATURE_SELFTEST_ECDSA + bool + default y + depends on FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_ECDSA=y || CRYPTO_ECDSA=FIPS_SIGNATURE_SELFTEST + endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 28b91adba2ae..bc65d3b98dcb 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -11,7 +11,6 @@ asymmetric_keys-y := \ signature.o obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o -obj-$(CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE) += asym_tpm.o # # X.509 Certificate handling @@ -21,7 +20,12 @@ x509_key_parser-y := \ x509.asn1.o \ x509_akid.asn1.o \ x509_cert_parser.o \ + x509_loader.o \ x509_public_key.o +obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o +x509_selftest-y += selftest.o +x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_RSA) += selftest_rsa.o +x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA) += selftest_ecdsa.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ @@ -75,14 +79,3 @@ verify_signed_pefile-y := \ $(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h $(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h - -# -# TPM private key parsing -# -obj-$(CONFIG_TPM_KEY_PARSER) += tpm_key_parser.o -tpm_key_parser-y := \ - tpm.asn1.o \ - tpm_parser.o - -$(obj)/tpm_parser.o: $(obj)/tpm.asn1.h -$(obj)/tpm.asn1.o: $(obj)/tpm.asn1.c $(obj)/tpm.asn1.h diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c deleted file mode 100644 index 0959613560b9..000000000000 --- a/crypto/asymmetric_keys/asym_tpm.c +++ /dev/null @@ -1,957 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define pr_fmt(fmt) "ASYM-TPM: "fmt -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/seq_file.h> -#include <linux/scatterlist.h> -#include <linux/tpm.h> -#include <linux/tpm_command.h> -#include <crypto/akcipher.h> -#include <crypto/hash.h> -#include <crypto/sha1.h> -#include <asm/unaligned.h> -#include <keys/asymmetric-subtype.h> -#include <keys/trusted_tpm.h> -#include <crypto/asym_tpm_subtype.h> -#include <crypto/public_key.h> - -#define TPM_ORD_FLUSHSPECIFIC 186 -#define TPM_ORD_LOADKEY2 65 -#define TPM_ORD_UNBIND 30 -#define TPM_ORD_SIGN 60 - -#define TPM_RT_KEY 0x00000001 - -/* - * Load a TPM key from the blob provided by userspace - */ -static int tpm_loadkey2(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *keyblob, int keybloblen, - uint32_t *newhandle) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - int ret; - - ordinal = htonl(TPM_ORD_LOADKEY2); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - keybloblen, keyblob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_LOADKEY2); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append(tb, keyblob, keybloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth, - SHA1_DIGEST_SIZE, 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - *newhandle = LOAD32(tb->data, TPM_DATA_OFFSET); - return 0; -} - -/* - * Execute the FlushSpecific TPM command - */ -static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle) -{ - tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_FLUSHSPECIFIC); - tpm_buf_append_u32(tb, handle); - tpm_buf_append_u32(tb, TPM_RT_KEY); - - return trusted_tpm_send(tb->data, MAX_BUF_SIZE); -} - -/* - * Decrypt a blob provided by userspace using a specific key handle. - * The handle is a well known handle or previously loaded by e.g. LoadKey2 - */ -static int tpm_unbind(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *blob, uint32_t bloblen, - void *out, uint32_t outlen) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - uint32_t datalen; - int ret; - - ordinal = htonl(TPM_ORD_UNBIND); - datalen = htonl(bloblen); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - sizeof(uint32_t), &datalen, - bloblen, blob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_UNBIND); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append_u32(tb, bloblen); - tpm_buf_append(tb, blob, bloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - datalen = LOAD32(tb->data, TPM_DATA_OFFSET); - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, - keyauth, SHA1_DIGEST_SIZE, - sizeof(uint32_t), TPM_DATA_OFFSET, - datalen, TPM_DATA_OFFSET + sizeof(uint32_t), - 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), - min(outlen, datalen)); - - return datalen; -} - -/* - * Sign a blob provided by userspace (that has had the hash function applied) - * using a specific key handle. The handle is assumed to have been previously - * loaded by e.g. LoadKey2. - * - * Note that the key signature scheme of the used key should be set to - * TPM_SS_RSASSAPKCS1v15_DER. This allows the hashed input to be of any size - * up to key_length_in_bytes - 11 and not be limited to size 20 like the - * TPM_SS_RSASSAPKCS1v15_SHA1 signature scheme. - */ -static int tpm_sign(struct tpm_buf *tb, - uint32_t keyhandle, unsigned char *keyauth, - const unsigned char *blob, uint32_t bloblen, - void *out, uint32_t outlen) -{ - unsigned char nonceodd[TPM_NONCE_SIZE]; - unsigned char enonce[TPM_NONCE_SIZE]; - unsigned char authdata[SHA1_DIGEST_SIZE]; - uint32_t authhandle = 0; - unsigned char cont = 0; - uint32_t ordinal; - uint32_t datalen; - int ret; - - ordinal = htonl(TPM_ORD_SIGN); - datalen = htonl(bloblen); - - /* session for loading the key */ - ret = oiap(tb, &authhandle, enonce); - if (ret < 0) { - pr_info("oiap failed (%d)\n", ret); - return ret; - } - - /* generate odd nonce */ - ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); - if (ret < 0) { - pr_info("tpm_get_random failed (%d)\n", ret); - return ret; - } - - /* calculate authorization HMAC value */ - ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, - nonceodd, cont, sizeof(uint32_t), &ordinal, - sizeof(uint32_t), &datalen, - bloblen, blob, 0, 0); - if (ret < 0) - return ret; - - /* build the request buffer */ - tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SIGN); - tpm_buf_append_u32(tb, keyhandle); - tpm_buf_append_u32(tb, bloblen); - tpm_buf_append(tb, blob, bloblen); - tpm_buf_append_u32(tb, authhandle); - tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE); - tpm_buf_append_u8(tb, cont); - tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE); - - ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); - if (ret < 0) { - pr_info("authhmac failed (%d)\n", ret); - return ret; - } - - datalen = LOAD32(tb->data, TPM_DATA_OFFSET); - - ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, - keyauth, SHA1_DIGEST_SIZE, - sizeof(uint32_t), TPM_DATA_OFFSET, - datalen, TPM_DATA_OFFSET + sizeof(uint32_t), - 0, 0); - if (ret < 0) { - pr_info("TSS_checkhmac1 failed (%d)\n", ret); - return ret; - } - - memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), - min(datalen, outlen)); - - return datalen; -} - -/* Room to fit two u32 zeros for algo id and parameters length. */ -#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2) - -/* - * Maximum buffer size for the BER/DER encoded public key. The public key - * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048 - * bit key and e is usually 65537 - * The encoding overhead is: - * - max 4 bytes for SEQUENCE - * - max 4 bytes for INTEGER n type/length - * - 257 bytes of n - * - max 2 bytes for INTEGER e type/length - * - 3 bytes of e - * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE) - */ -#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE) - -/* - * Provide a part of a description of the key for /proc/keys. - */ -static void asym_tpm_describe(const struct key *asymmetric_key, - struct seq_file *m) -{ - struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto]; - - if (!tk) - return; - - seq_printf(m, "TPM1.2/Blob"); -} - -static void asym_tpm_destroy(void *payload0, void *payload3) -{ - struct tpm_key *tk = payload0; - - if (!tk) - return; - - kfree(tk->blob); - tk->blob_len = 0; - - kfree(tk); -} - -/* How many bytes will it take to encode the length */ -static inline uint32_t definite_length(uint32_t len) -{ - if (len <= 127) - return 1; - if (len <= 255) - return 2; - return 3; -} - -static inline uint8_t *encode_tag_length(uint8_t *buf, uint8_t tag, - uint32_t len) -{ - *buf++ = tag; - - if (len <= 127) { - buf[0] = len; - return buf + 1; - } - - if (len <= 255) { - buf[0] = 0x81; - buf[1] = len; - return buf + 2; - } - - buf[0] = 0x82; - put_unaligned_be16(len, buf + 1); - return buf + 3; -} - -static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf) -{ - uint8_t *cur = buf; - uint32_t n_len = definite_length(len) + 1 + len + 1; - uint32_t e_len = definite_length(3) + 1 + 3; - uint8_t e[3] = { 0x01, 0x00, 0x01 }; - - /* SEQUENCE */ - cur = encode_tag_length(cur, 0x30, n_len + e_len); - /* INTEGER n */ - cur = encode_tag_length(cur, 0x02, len + 1); - cur[0] = 0x00; - memcpy(cur + 1, pub_key, len); - cur += len + 1; - cur = encode_tag_length(cur, 0x02, sizeof(e)); - memcpy(cur, e, sizeof(e)); - cur += sizeof(e); - /* Zero parameters to satisfy set_pub_key ABI. */ - memzero_explicit(cur, SETKEY_PARAMS_SIZE); - - return cur - buf; -} - -/* - * Determine the crypto algorithm name. - */ -static int determine_akcipher(const char *encoding, const char *hash_algo, - char alg_name[CRYPTO_MAX_ALG_NAME]) -{ - if (strcmp(encoding, "pkcs1") == 0) { - if (!hash_algo) { - strcpy(alg_name, "pkcs1pad(rsa)"); - return 0; - } - - if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(rsa,%s)", - hash_algo) >= CRYPTO_MAX_ALG_NAME) - return -EINVAL; - - return 0; - } - - if (strcmp(encoding, "raw") == 0) { - strcpy(alg_name, "rsa"); - return 0; - } - - return -ENOPKG; -} - -/* - * Query information about a key. - */ -static int tpm_key_query(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info) -{ - struct tpm_key *tk = params->key->payload.data[asym_crypto]; - int ret; - char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_akcipher *tfm; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int len; - - /* TPM only works on private keys, public keys still done in software */ - ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - len = crypto_akcipher_maxsize(tfm); - - info->key_size = tk->key_len; - info->max_data_size = tk->key_len / 8; - info->max_sig_size = len; - info->max_enc_size = len; - info->max_dec_size = tk->key_len / 8; - - info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT | - KEYCTL_SUPPORTS_DECRYPT | - KEYCTL_SUPPORTS_VERIFY | - KEYCTL_SUPPORTS_SIGN; - - ret = 0; -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - return ret; -} - -/* - * Encryption operation is performed with the public key. Hence it is done - * in software - */ -static int tpm_key_encrypt(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct crypto_wait cwait; - struct scatterlist in_sg, out_sg; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int ret; - - pr_devel("==>%s()\n", __func__); - - ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - - sg_init_one(&in_sg, in, params->in_len); - sg_init_one(&out_sg, out, params->out_len); - akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, - params->out_len); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - - ret = crypto_akcipher_encrypt(req); - ret = crypto_wait_req(ret, &cwait); - - if (ret == 0) - ret = req->dst_len; - - akcipher_request_free(req); -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - return ret; -} - -/* - * Decryption operation is performed with the private key in the TPM. - */ -static int tpm_key_decrypt(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_buf tb; - uint32_t keyhandle; - uint8_t srkauth[SHA1_DIGEST_SIZE]; - uint8_t keyauth[SHA1_DIGEST_SIZE]; - int r; - - pr_devel("==>%s()\n", __func__); - - if (params->hash_algo) - return -ENOPKG; - - if (strcmp(params->encoding, "pkcs1")) - return -ENOPKG; - - r = tpm_buf_init(&tb, 0, 0); - if (r) - return r; - - /* TODO: Handle a non-all zero SRK authorization */ - memset(srkauth, 0, sizeof(srkauth)); - - r = tpm_loadkey2(&tb, SRKHANDLE, srkauth, - tk->blob, tk->blob_len, &keyhandle); - if (r < 0) { - pr_devel("loadkey2 failed (%d)\n", r); - goto error; - } - - /* TODO: Handle a non-all zero key authorization */ - memset(keyauth, 0, sizeof(keyauth)); - - r = tpm_unbind(&tb, keyhandle, keyauth, - in, params->in_len, out, params->out_len); - if (r < 0) - pr_devel("tpm_unbind failed (%d)\n", r); - - if (tpm_flushspecific(&tb, keyhandle) < 0) - pr_devel("flushspecific failed (%d)\n", r); - -error: - tpm_buf_destroy(&tb); - pr_devel("<==%s() = %d\n", __func__, r); - return r; -} - -/* - * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. - */ -static const u8 digest_info_md5[] = { - 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ - 0x05, 0x00, 0x04, 0x10 -}; - -static const u8 digest_info_sha1[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x0e, 0x03, 0x02, 0x1a, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 digest_info_rmd160[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x24, 0x03, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 digest_info_sha224[] = { - 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, - 0x05, 0x00, 0x04, 0x1c -}; - -static const u8 digest_info_sha256[] = { - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x20 -}; - -static const u8 digest_info_sha384[] = { - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, - 0x05, 0x00, 0x04, 0x30 -}; - -static const u8 digest_info_sha512[] = { - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, - 0x05, 0x00, 0x04, 0x40 -}; - -static const struct asn1_template { - const char *name; - const u8 *data; - size_t size; -} asn1_templates[] = { -#define _(X) { #X, digest_info_##X, sizeof(digest_info_##X) } - _(md5), - _(sha1), - _(rmd160), - _(sha256), - _(sha384), - _(sha512), - _(sha224), - { NULL } -#undef _ -}; - -static const struct asn1_template *lookup_asn1(const char *name) -{ - const struct asn1_template *p; - - for (p = asn1_templates; p->name; p++) - if (strcmp(name, p->name) == 0) - return p; - return NULL; -} - -/* - * Sign operation is performed with the private key in the TPM. - */ -static int tpm_key_sign(struct tpm_key *tk, - struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_buf tb; - uint32_t keyhandle; - uint8_t srkauth[SHA1_DIGEST_SIZE]; - uint8_t keyauth[SHA1_DIGEST_SIZE]; - void *asn1_wrapped = NULL; - uint32_t in_len = params->in_len; - int r; - - pr_devel("==>%s()\n", __func__); - - if (strcmp(params->encoding, "pkcs1")) - return -ENOPKG; - - if (params->hash_algo) { - const struct asn1_template *asn1 = - lookup_asn1(params->hash_algo); - - if (!asn1) - return -ENOPKG; - - /* request enough space for the ASN.1 template + input hash */ - asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL); - if (!asn1_wrapped) - return -ENOMEM; - - /* Copy ASN.1 template, then the input */ - memcpy(asn1_wrapped, asn1->data, asn1->size); - memcpy(asn1_wrapped + asn1->size, in, in_len); - - in = asn1_wrapped; - in_len += asn1->size; - } - - if (in_len > tk->key_len / 8 - 11) { - r = -EOVERFLOW; - goto error_free_asn1_wrapped; - } - - r = tpm_buf_init(&tb, 0, 0); - if (r) - goto error_free_asn1_wrapped; - - /* TODO: Handle a non-all zero SRK authorization */ - memset(srkauth, 0, sizeof(srkauth)); - - r = tpm_loadkey2(&tb, SRKHANDLE, srkauth, - tk->blob, tk->blob_len, &keyhandle); - if (r < 0) { - pr_devel("loadkey2 failed (%d)\n", r); - goto error_free_tb; - } - - /* TODO: Handle a non-all zero key authorization */ - memset(keyauth, 0, sizeof(keyauth)); - - r = tpm_sign(&tb, keyhandle, keyauth, in, in_len, out, params->out_len); - if (r < 0) - pr_devel("tpm_sign failed (%d)\n", r); - - if (tpm_flushspecific(&tb, keyhandle) < 0) - pr_devel("flushspecific failed (%d)\n", r); - -error_free_tb: - tpm_buf_destroy(&tb); -error_free_asn1_wrapped: - kfree(asn1_wrapped); - pr_devel("<==%s() = %d\n", __func__, r); - return r; -} - -/* - * Do encryption, decryption and signing ops. - */ -static int tpm_key_eds_op(struct kernel_pkey_params *params, - const void *in, void *out) -{ - struct tpm_key *tk = params->key->payload.data[asym_crypto]; - int ret = -EOPNOTSUPP; - - /* Perform the encryption calculation. */ - switch (params->op) { - case kernel_pkey_encrypt: - ret = tpm_key_encrypt(tk, params, in, out); - break; - case kernel_pkey_decrypt: - ret = tpm_key_decrypt(tk, params, in, out); - break; - case kernel_pkey_sign: - ret = tpm_key_sign(tk, params, in, out); - break; - default: - BUG(); - } - - return ret; -} - -/* - * Verify a signature using a public key. - */ -static int tpm_key_verify_signature(const struct key *key, - const struct public_key_signature *sig) -{ - const struct tpm_key *tk = key->payload.data[asym_crypto]; - struct crypto_wait cwait; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct scatterlist src_sg[2]; - char alg_name[CRYPTO_MAX_ALG_NAME]; - uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; - uint32_t der_pub_key_len; - int ret; - - pr_devel("==>%s()\n", __func__); - - BUG_ON(!tk); - BUG_ON(!sig); - BUG_ON(!sig->s); - - if (!sig->digest) - return -ENOPKG; - - ret = determine_akcipher(sig->encoding, sig->hash_algo, alg_name); - if (ret < 0) - return ret; - - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, - der_pub_key); - - ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); - if (ret < 0) - goto error_free_tfm; - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - - sg_init_table(src_sg, 2); - sg_set_buf(&src_sg[0], sig->s, sig->s_size); - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, - sig->digest_size); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); - - akcipher_request_free(req); -error_free_tfm: - crypto_free_akcipher(tfm); - pr_devel("<==%s() = %d\n", __func__, ret); - if (WARN_ON_ONCE(ret > 0)) - ret = -EINVAL; - return ret; -} - -/* - * Parse enough information out of TPM_KEY structure: - * TPM_STRUCT_VER -> 4 bytes - * TPM_KEY_USAGE -> 2 bytes - * TPM_KEY_FLAGS -> 4 bytes - * TPM_AUTH_DATA_USAGE -> 1 byte - * TPM_KEY_PARMS -> variable - * UINT32 PCRInfoSize -> 4 bytes - * BYTE* -> PCRInfoSize bytes - * TPM_STORE_PUBKEY - * UINT32 encDataSize; - * BYTE* -> encDataSize; - * - * TPM_KEY_PARMS: - * TPM_ALGORITHM_ID -> 4 bytes - * TPM_ENC_SCHEME -> 2 bytes - * TPM_SIG_SCHEME -> 2 bytes - * UINT32 parmSize -> 4 bytes - * BYTE* -> variable - */ -static int extract_key_parameters(struct tpm_key *tk) -{ - const void *cur = tk->blob; - uint32_t len = tk->blob_len; - const void *pub_key; - uint32_t sz; - uint32_t key_len; - - if (len < 11) - return -EBADMSG; - - /* Ensure this is a legacy key */ - if (get_unaligned_be16(cur + 4) != 0x0015) - return -EBADMSG; - - /* Skip to TPM_KEY_PARMS */ - cur += 11; - len -= 11; - - if (len < 12) - return -EBADMSG; - - /* Make sure this is an RSA key */ - if (get_unaligned_be32(cur) != 0x00000001) - return -EBADMSG; - - /* Make sure this is TPM_ES_RSAESPKCSv15 encoding scheme */ - if (get_unaligned_be16(cur + 4) != 0x0002) - return -EBADMSG; - - /* Make sure this is TPM_SS_RSASSAPKCS1v15_DER signature scheme */ - if (get_unaligned_be16(cur + 6) != 0x0003) - return -EBADMSG; - - sz = get_unaligned_be32(cur + 8); - if (len < sz + 12) - return -EBADMSG; - - /* Move to TPM_RSA_KEY_PARMS */ - len -= 12; - cur += 12; - - /* Grab the RSA key length */ - key_len = get_unaligned_be32(cur); - - switch (key_len) { - case 512: - case 1024: - case 1536: - case 2048: - break; - default: - return -EINVAL; - } - - /* Move just past TPM_KEY_PARMS */ - cur += sz; - len -= sz; - - if (len < 4) - return -EBADMSG; - - sz = get_unaligned_be32(cur); - if (len < 4 + sz) - return -EBADMSG; - - /* Move to TPM_STORE_PUBKEY */ - cur += 4 + sz; - len -= 4 + sz; - - /* Grab the size of the public key, it should jive with the key size */ - sz = get_unaligned_be32(cur); - if (sz > 256) - return -EINVAL; - - pub_key = cur + 4; - - tk->key_len = key_len; - tk->pub_key = pub_key; - tk->pub_key_len = sz; - - return 0; -} - -/* Given the blob, parse it and load it into the TPM */ -struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len) -{ - int r; - struct tpm_key *tk; - - r = tpm_is_tpm2(NULL); - if (r < 0) - goto error; - - /* We don't support TPM2 yet */ - if (r > 0) { - r = -ENODEV; - goto error; - } - - r = -ENOMEM; - tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL); - if (!tk) - goto error; - - tk->blob = kmemdup(blob, blob_len, GFP_KERNEL); - if (!tk->blob) - goto error_memdup; - - tk->blob_len = blob_len; - - r = extract_key_parameters(tk); - if (r < 0) - goto error_extract; - - return tk; - -error_extract: - kfree(tk->blob); - tk->blob_len = 0; -error_memdup: - kfree(tk); -error: - return ERR_PTR(r); -} -EXPORT_SYMBOL_GPL(tpm_key_create); - -/* - * TPM-based asymmetric key subtype - */ -struct asymmetric_key_subtype asym_tpm_subtype = { - .owner = THIS_MODULE, - .name = "asym_tpm", - .name_len = sizeof("asym_tpm") - 1, - .describe = asym_tpm_describe, - .destroy = asym_tpm_destroy, - .query = tpm_key_query, - .eds_op = tpm_key_eds_op, - .verify_signature = tpm_key_verify_signature, -}; -EXPORT_SYMBOL_GPL(asym_tpm_subtype); - -MODULE_DESCRIPTION("TPM based asymmetric key subtype"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index ad8af3d70ac0..348966ea2175 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -11,23 +11,13 @@ #include <crypto/public_key.h> #include <linux/seq_file.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/slab.h> #include <linux/ctype.h> #include <keys/system_keyring.h> #include <keys/user-type.h> #include "asymmetric_keys.h" -MODULE_LICENSE("GPL"); - -const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = { - [VERIFYING_MODULE_SIGNATURE] = "mod sig", - [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig", - [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig", - [VERIFYING_KEY_SIGNATURE] = "key sig", - [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig", - [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig", -}; -EXPORT_SYMBOL_GPL(key_being_used_for); static LIST_HEAD(asymmetric_key_parsers); static DECLARE_RWSEM(asymmetric_key_parsers_sem); @@ -36,16 +26,23 @@ static DECLARE_RWSEM(asymmetric_key_parsers_sem); * find_asymmetric_key - Find a key by ID. * @keyring: The keys to search. * @id_0: The first ID to look for or NULL. - * @id_1: The second ID to look for or NULL. - * @partial: Use partial match if true, exact if false. + * @id_1: The second ID to look for or NULL, matched together with @id_0 + * against @keyring keys' id[0] and id[1]. + * @id_2: The fallback ID to match against @keyring keys' id[2] if both of the + * other IDs are NULL. + * @partial: Use partial match for @id_0 and @id_1 if true, exact if false. * * Find a key in the given keyring by identifier. The preferred identifier is * the id_0 and the fallback identifier is the id_1. If both are given, the - * lookup is by the former, but the latter must also match. + * former is matched (exactly or partially) against either of the sought key's + * identifiers and the latter must match the found key's second identifier + * exactly. If both are missing, id_2 must match the sought key's third + * identifier exactly. */ struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, + const struct asymmetric_key_id *id_2, bool partial) { struct key *key; @@ -54,14 +51,18 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len; - BUG_ON(!id_0 && !id_1); - if (id_0) { lookup = id_0->data; len = id_0->len; - } else { + } else if (id_1) { lookup = id_1->data; len = id_1->len; + } else if (id_2) { + lookup = id_2->data; + len = id_2->len; + } else { + WARN_ON(1); + return ERR_PTR(-EINVAL); } /* Construct an identifier "id:<keyid>". */ @@ -69,7 +70,10 @@ struct key *find_asymmetric_key(struct key *keyring, if (!req) return ERR_PTR(-ENOMEM); - if (partial) { + if (!id_0 && !id_1) { + *p++ = 'd'; + *p++ = 'n'; + } else if (partial) { *p++ = 'i'; *p++ = 'd'; } else { @@ -138,12 +142,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1, size_t len_2) { struct asymmetric_key_id *kid; - - kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2, - GFP_KERNEL); + size_t kid_sz; + size_t len; + + if (check_add_overflow(len_1, len_2, &len)) + return ERR_PTR(-EOVERFLOW); + if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz)) + return ERR_PTR(-EOVERFLOW); + kid = kmalloc(kid_sz, GFP_KERNEL); if (!kid) return ERR_PTR(-ENOMEM); - kid->len = len_1 + len_2; + kid->len = len; memcpy(kid->data, val_1, len_1); memcpy(kid->data + len_1, val_2, len_2); return kid; @@ -185,8 +194,8 @@ bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, EXPORT_SYMBOL_GPL(asymmetric_key_id_partial); /** - * asymmetric_match_key_ids - Search asymmetric key IDs - * @kids: The list of key IDs to check + * asymmetric_match_key_ids - Search asymmetric key IDs 1 & 2 + * @kids: The pair of key IDs to check * @match_id: The key ID we're looking for * @match: The match function to use */ @@ -200,7 +209,7 @@ static bool asymmetric_match_key_ids( if (!kids || !match_id) return false; - for (i = 0; i < ARRAY_SIZE(kids->id); i++) + for (i = 0; i < 2; i++) if (match(kids->id[i], match_id)) return true; return false; @@ -244,7 +253,7 @@ struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id) } /* - * Match asymmetric keys by an exact match on an ID. + * Match asymmetric keys by an exact match on one of the first two IDs. */ static bool asymmetric_key_cmp(const struct key *key, const struct key_match_data *match_data) @@ -257,7 +266,7 @@ static bool asymmetric_key_cmp(const struct key *key, } /* - * Match asymmetric keys by a partial match on an IDs. + * Match asymmetric keys by a partial match on one of the first two IDs. */ static bool asymmetric_key_cmp_partial(const struct key *key, const struct key_match_data *match_data) @@ -270,14 +279,27 @@ static bool asymmetric_key_cmp_partial(const struct key *key, } /* + * Match asymmetric keys by an exact match on the third IDs. + */ +static bool asymmetric_key_cmp_name(const struct key *key, + const struct key_match_data *match_data) +{ + const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); + const struct asymmetric_key_id *match_id = match_data->preparsed; + + return kids && asymmetric_key_id_same(kids->id[2], match_id); +} + +/* * Preparse the match criterion. If we don't set lookup_type and cmp, * the default will be an exact match on the key description. * * There are some specifiers for matching key IDs rather than by the key * description: * - * "id:<id>" - find a key by partial match on any available ID - * "ex:<id>" - find a key by exact match on any available ID + * "id:<id>" - find a key by partial match on one of the first two IDs + * "ex:<id>" - find a key by exact match on one of the first two IDs + * "dn:<id>" - find a key by exact match on the third ID * * These have to be searched by iteration rather than by direct lookup because * the key is hashed according to its description. @@ -301,6 +323,11 @@ static int asymmetric_key_match_preparse(struct key_match_data *match_data) spec[1] == 'x' && spec[2] == ':') { id = spec + 3; + } else if (spec[0] == 'd' && + spec[1] == 'n' && + spec[2] == ':') { + id = spec + 3; + cmp = asymmetric_key_cmp_name; } else { goto default_match; } diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 839591ad21ac..8aecbe4637f3 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,12 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_md4: - ctx->digest_algo = "md4"; - break; - case OID_md5: - ctx->digest_algo = "md5"; - break; case OID_sha1: ctx->digest_algo = "sha1"; break; @@ -93,8 +87,14 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, case OID_sha512: ctx->digest_algo = "sha512"; break; - case OID_sha224: - ctx->digest_algo = "sha224"; + case OID_sha3_256: + ctx->digest_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->digest_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->digest_algo = "sha3-512"; break; case OID__NR: diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1 index 1eca740b816a..28e1f4a41c14 100644 --- a/crypto/asymmetric_keys/pkcs7.asn1 +++ b/crypto/asymmetric_keys/pkcs7.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2009 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5652#section-3 + PKCS7ContentInfo ::= SEQUENCE { contentType ContentType ({ pkcs7_check_content_type }), content [0] EXPLICIT SignedData OPTIONAL diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 967329e0a07b..423d13c47545 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,12 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_md4: - ctx->sinfo->sig->hash_algo = "md4"; - break; - case OID_md5: - ctx->sinfo->sig->hash_algo = "md5"; - break; case OID_sha1: ctx->sinfo->sig->hash_algo = "sha1"; break; @@ -248,6 +242,24 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_sha224: ctx->sinfo->sig->hash_algo = "sha224"; break; + case OID_sm3: + ctx->sinfo->sig->hash_algo = "sm3"; + break; + case OID_gost2012Digest256: + ctx->sinfo->sig->hash_algo = "streebog256"; + break; + case OID_gost2012Digest512: + ctx->sinfo->sig->hash_algo = "streebog512"; + break; + case OID_sha3_256: + ctx->sinfo->sig->hash_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->sinfo->sig->hash_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->sinfo->sig->hash_algo = "sha3-512"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -269,6 +281,22 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; + case OID_id_ecdsa_with_sha1: + case OID_id_ecdsa_with_sha224: + case OID_id_ecdsa_with_sha256: + case OID_id_ecdsa_with_sha384: + case OID_id_ecdsa_with_sha512: + case OID_id_ecdsa_with_sha3_256: + case OID_id_ecdsa_with_sha3_384: + case OID_id_ecdsa_with_sha3_512: + ctx->sinfo->sig->pkey_algo = "ecdsa"; + ctx->sinfo->sig->encoding = "x962"; + break; + case OID_gost2012PKey256: + case OID_gost2012PKey512: + ctx->sinfo->sig->pkey_algo = "ecrdsa"; + ctx->sinfo->sig->encoding = "raw"; + break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index b531df2013c4..9a87c34ed173 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c @@ -48,7 +48,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, * keys. */ key = find_asymmetric_key(trust_keyring, - x509->id, x509->skid, false); + x509->id, x509->skid, NULL, false); if (!IS_ERR(key)) { /* One of the X.509 certificates in the PKCS#7 message * is apparently the same as one we already trust. @@ -82,7 +82,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, key = find_asymmetric_key(trust_keyring, last->sig->auth_ids[0], last->sig->auth_ids[1], - false); + NULL, false); if (!IS_ERR(key)) { x509 = last; pr_devel("sinfo %u: Root cert %u signer is key %x\n", @@ -97,7 +97,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, * the signed info directly. */ key = find_asymmetric_key(trust_keyring, - sinfo->sig->auth_ids[0], NULL, false); + sinfo->sig->auth_ids[0], NULL, NULL, false); if (!IS_ERR(key)) { pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index 0b4d07aa8811..6d6475e3a9bf 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c @@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7, } if (sinfo->msgdigest_len != sig->digest_size) { - pr_debug("Sig %u: Invalid digest size (%u)\n", - sinfo->index, sinfo->msgdigest_len); + pr_warn("Sig %u: Invalid digest size (%u)\n", + sinfo->index, sinfo->msgdigest_len); ret = -EBADMSG; goto error; } if (memcmp(sig->digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) { - pr_debug("Sig %u: Message digest doesn't match\n", - sinfo->index); + pr_warn("Sig %u: Message digest doesn't match\n", + sinfo->index); ret = -EKEYREJECTED; goto error; } @@ -174,12 +174,6 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); - if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { - pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", - sinfo->index); - continue; - } - sinfo->signer = x509; return 0; } @@ -226,9 +220,6 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, return 0; } - if (x509->unsupported_key) - goto unsupported_crypto_in_x509; - pr_debug("- issuer %s\n", x509->issuer); sig = x509->sig; if (sig->auth_ids[0]) @@ -245,7 +236,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, * authority. */ if (x509->unsupported_sig) - goto unsupported_crypto_in_x509; + goto unsupported_sig_in_x509; x509->signer = x509; pr_debug("- self-signed\n"); return 0; @@ -309,7 +300,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, might_sleep(); } -unsupported_crypto_in_x509: +unsupported_sig_in_x509: /* Just prune the certificate chain at this point if we lack some * crypto module to go further. Note, however, we don't want to set * sinfo->unsupported_crypto as the signed info block may still be @@ -438,6 +429,7 @@ int pkcs7_verify(struct pkcs7_message *pkcs7, /* Authattr presence checked in parser */ break; case VERIFYING_UNSPECIFIED_SIGNATURE: + case VERIFYING_BPF_SIGNATURE: if (pkcs7->data_type != OID_data) { pr_warn("Invalid unspecified sig (not pkcs7-data)\n"); return -EKEYREJECTED; @@ -487,10 +479,11 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, const void *data, size_t datalen) { if (pkcs7->data) { - pr_debug("Data already supplied\n"); + pr_warn("Data already supplied\n"); return -EINVAL; } pkcs7->data = data; pkcs7->data_len = datalen; return 0; } +EXPORT_SYMBOL_GPL(pkcs7_supply_detached_data); diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1 index 702c41a3c713..a2a8af2633d8 100644 --- a/crypto/asymmetric_keys/pkcs8.asn1 +++ b/crypto/asymmetric_keys/pkcs8.asn1 @@ -1,3 +1,9 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2010 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5958#section-2 -- -- This is the unencrypted variant -- diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 4fefb219bfdc..e5b177c8e842 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -8,18 +8,17 @@ */ #define pr_fmt(fmt) "PKEY: "fmt -#include <linux/module.h> -#include <linux/export.h> +#include <crypto/akcipher.h> +#include <crypto/public_key.h> +#include <crypto/sig.h> +#include <keys/asymmetric-subtype.h> +#include <linux/asn1.h> +#include <linux/err.h> #include <linux/kernel.h> -#include <linux/slab.h> +#include <linux/module.h> #include <linux/seq_file.h> -#include <linux/scatterlist.h> -#include <linux/asn1.h> -#include <keys/asymmetric-subtype.h> -#include <crypto/public_key.h> -#include <crypto/akcipher.h> -#include <crypto/sm2.h> -#include <crypto/sm3_base.h> +#include <linux/slab.h> +#include <linux/string.h> MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); MODULE_AUTHOR("Red Hat, Inc."); @@ -43,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key, void public_key_free(struct public_key *key) { if (key) { - kfree(key->key); + kfree_sensitive(key->key); kfree(key->params); kfree(key); } @@ -60,39 +59,96 @@ static void public_key_destroy(void *payload0, void *payload3) } /* - * Determine the crypto algorithm name. + * Given a public_key, and an encoding and hash_algo to be used for signing + * and/or verification with that key, determine the name of the corresponding + * akcipher algorithm. Also check that encoding and hash_algo are allowed. */ -static -int software_key_determine_akcipher(const char *encoding, - const char *hash_algo, - const struct public_key *pkey, - char alg_name[CRYPTO_MAX_ALG_NAME]) +static int +software_key_determine_akcipher(const struct public_key *pkey, + const char *encoding, const char *hash_algo, + char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig, + enum kernel_pkey_operation op) { int n; - if (strcmp(encoding, "pkcs1") == 0) { - /* The data wangled by the RSA algorithm is typically padded - * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447 - * sec 8.2]. + *sig = true; + + if (!encoding) + return -EINVAL; + + if (strcmp(pkey->pkey_algo, "rsa") == 0) { + /* + * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. + */ + if (strcmp(encoding, "pkcs1") == 0) { + *sig = op == kernel_pkey_sign || + op == kernel_pkey_verify; + if (!*sig) { + /* + * For encrypt/decrypt, hash_algo is not used + * but allowed to be set for historic reasons. + */ + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s)", + pkey->pkey_algo); + } else { + if (!hash_algo) + hash_algo = "none"; + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1(%s,%s)", + pkey->pkey_algo, hash_algo); + } + return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; + } + if (strcmp(encoding, "raw") != 0) + return -EINVAL; + /* + * Raw RSA cannot differentiate between different hash + * algorithms. + */ + if (hash_algo) + return -EINVAL; + *sig = false; + } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { + if (strcmp(encoding, "x962") != 0 && + strcmp(encoding, "p1363") != 0) + return -EINVAL; + /* + * ECDSA signatures are taken over a raw hash, so they don't + * differentiate between different hash algorithms. That means + * that the verifier should hard-code a specific hash algorithm. + * Unfortunately, in practice ECDSA is used with multiple SHAs, + * so we have to allow all of them and not just one. */ if (!hash_algo) - n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s)", - pkey->pkey_algo); - else - n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", - pkey->pkey_algo, hash_algo); + return -EINVAL; + if (strcmp(hash_algo, "sha1") != 0 && + strcmp(hash_algo, "sha224") != 0 && + strcmp(hash_algo, "sha256") != 0 && + strcmp(hash_algo, "sha384") != 0 && + strcmp(hash_algo, "sha512") != 0 && + strcmp(hash_algo, "sha3-256") != 0 && + strcmp(hash_algo, "sha3-384") != 0 && + strcmp(hash_algo, "sha3-512") != 0) + return -EINVAL; + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", + encoding, pkey->pkey_algo); return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; + } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) { + if (strcmp(encoding, "raw") != 0) + return -EINVAL; + if (!hash_algo) + return -EINVAL; + if (strcmp(hash_algo, "streebog256") != 0 && + strcmp(hash_algo, "streebog512") != 0) + return -EINVAL; + } else { + /* Unknown public key algorithm */ + return -ENOPKG; } - - if (strcmp(encoding, "raw") == 0 || - strcmp(encoding, "x962") == 0) { - strcpy(alg_name, pkey->pkey_algo); - return 0; - } - - return -ENOPKG; + if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0) + return -EINVAL; + return 0; } static u8 *pkey_pack_u32(u8 *dst, u32 val) @@ -107,57 +163,100 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val) static int software_key_query(const struct kernel_pkey_params *params, struct kernel_pkey_query *info) { - struct crypto_akcipher *tfm; struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; u8 *key, *ptr; int ret, len; + bool issig; - ret = software_key_determine_akcipher(params->encoding, - params->hash_algo, - pkey, alg_name); + ret = software_key_determine_akcipher(pkey, params->encoding, + params->hash_algo, alg_name, + &issig, kernel_pkey_sign); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) - goto error_free_tfm; + return -ENOMEM; + memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); - if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); - else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); - if (ret < 0) - goto error_free_key; + memset(info, 0, sizeof(*info)); - len = crypto_akcipher_maxsize(tfm); - info->key_size = len * 8; - info->max_data_size = len; - info->max_sig_size = len; - info->max_enc_size = len; - info->max_dec_size = len; - info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT | - KEYCTL_SUPPORTS_VERIFY); - if (pkey->key_is_private) - info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT | - KEYCTL_SUPPORTS_SIGN); - ret = 0; + if (issig) { + struct crypto_sig *sig; + + sig = crypto_alloc_sig(alg_name, 0, 0); + if (IS_ERR(sig)) { + ret = PTR_ERR(sig); + goto error_free_key; + } + + if (pkey->key_is_private) + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); + else + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); + if (ret < 0) + goto error_free_sig; + + len = crypto_sig_keysize(sig); + info->key_size = len; + info->max_sig_size = crypto_sig_maxsize(sig); + info->max_data_size = crypto_sig_digestsize(sig); + + info->supported_ops = KEYCTL_SUPPORTS_VERIFY; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_SIGN; + + if (strcmp(params->encoding, "pkcs1") == 0) { + info->max_enc_size = len / BITS_PER_BYTE; + info->max_dec_size = len / BITS_PER_BYTE; + + info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; + } + +error_free_sig: + crypto_free_sig(sig); + } else { + struct crypto_akcipher *tfm; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + goto error_free_key; + } + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + if (ret < 0) + goto error_free_akcipher; + + len = crypto_akcipher_maxsize(tfm); + info->key_size = len * BITS_PER_BYTE; + info->max_sig_size = len; + info->max_data_size = len; + info->max_enc_size = len; + info->max_dec_size = len; + + info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; + if (pkey->key_is_private) + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; + +error_free_akcipher: + crypto_free_akcipher(tfm); + } error_free_key: - kfree(key); -error_free_tfm: - crypto_free_akcipher(tfm); + kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } @@ -169,35 +268,25 @@ static int software_key_eds_op(struct kernel_pkey_params *params, const void *in, void *out) { const struct public_key *pkey = params->key->payload.data[asym_crypto]; - struct akcipher_request *req; - struct crypto_akcipher *tfm; - struct crypto_wait cwait; - struct scatterlist in_sg, out_sg; char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_akcipher *tfm; + struct crypto_sig *sig; char *key, *ptr; + bool issig; int ret; pr_devel("==>%s()\n", __func__); - ret = software_key_determine_akcipher(params->encoding, - params->hash_algo, - pkey, alg_name); + ret = software_key_determine_akcipher(pkey, params->encoding, + params->hash_algo, alg_name, + &issig, params->op); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) - goto error_free_req; + return -ENOMEM; memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; @@ -205,118 +294,84 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); - if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); - else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); - if (ret) - goto error_free_key; + if (issig) { + sig = crypto_alloc_sig(alg_name, 0, 0); + if (IS_ERR(sig)) { + ret = PTR_ERR(sig); + goto error_free_key; + } - sg_init_one(&in_sg, in, params->in_len); - sg_init_one(&out_sg, out, params->out_len); - akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, - params->out_len); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); + if (pkey->key_is_private) + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); + else + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); + if (ret) + goto error_free_tfm; + } else { + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + goto error_free_key; + } + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + if (ret) + goto error_free_tfm; + } + + ret = -EINVAL; /* Perform the encryption calculation. */ switch (params->op) { case kernel_pkey_encrypt: - ret = crypto_akcipher_encrypt(req); + if (issig) + break; + ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len, + out, params->out_len); break; case kernel_pkey_decrypt: - ret = crypto_akcipher_decrypt(req); + if (issig) + break; + ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len, + out, params->out_len); break; case kernel_pkey_sign: - ret = crypto_akcipher_sign(req); + if (!issig) + break; + ret = crypto_sig_sign(sig, in, params->in_len, + out, params->out_len); break; default: BUG(); } - ret = crypto_wait_req(ret, &cwait); - if (ret == 0) - ret = req->dst_len; + if (!issig && ret == 0) + ret = crypto_akcipher_maxsize(tfm); -error_free_key: - kfree(key); -error_free_req: - akcipher_request_free(req); error_free_tfm: - crypto_free_akcipher(tfm); + if (issig) + crypto_free_sig(sig); + else + crypto_free_akcipher(tfm); +error_free_key: + kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } -#if IS_REACHABLE(CONFIG_CRYPTO_SM2) -static int cert_sig_digest_update(const struct public_key_signature *sig, - struct crypto_akcipher *tfm_pkey) -{ - struct crypto_shash *tfm; - struct shash_desc *desc; - size_t desc_size; - unsigned char dgst[SM3_DIGEST_SIZE]; - int ret; - - BUG_ON(!sig->data); - - ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, - SM2_DEFAULT_USERID_LEN, dgst); - if (ret) - return ret; - - tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); - desc = kzalloc(desc_size, GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto error_free_tfm; - } - - desc->tfm = tfm; - - ret = crypto_shash_init(desc); - if (ret < 0) - goto error_free_desc; - - ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE); - if (ret < 0) - goto error_free_desc; - - ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest); - -error_free_desc: - kfree(desc); -error_free_tfm: - crypto_free_shash(tfm); - return ret; -} -#else -static inline int cert_sig_digest_update( - const struct public_key_signature *sig, - struct crypto_akcipher *tfm_pkey) -{ - return -ENOTSUPP; -} -#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */ - /* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig) { - struct crypto_wait cwait; - struct crypto_akcipher *tfm; - struct akcipher_request *req; - struct scatterlist src_sg[2]; char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_sig *tfm; char *key, *ptr; + bool issig; int ret; pr_devel("==>%s()\n", __func__); @@ -325,25 +380,37 @@ int public_key_verify_signature(const struct public_key *pkey, BUG_ON(!sig); BUG_ON(!sig->s); - ret = software_key_determine_akcipher(sig->encoding, - sig->hash_algo, - pkey, alg_name); + /* + * If the signature specifies a public key algorithm, it *must* match + * the key's actual public key algorithm. + * + * Small exception: ECDSA signatures don't specify the curve, but ECDSA + * keys do. So the strings can mismatch slightly in that case: + * "ecdsa-nist-*" for the key, but "ecdsa" for the signature. + */ + if (sig->pkey_algo) { + if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 && + (strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 || + strcmp(sig->pkey_algo, "ecdsa") != 0)) + return -EKEYREJECTED; + } + + ret = software_key_determine_akcipher(pkey, sig->encoding, + sig->hash_algo, alg_name, + &issig, kernel_pkey_verify); if (ret < 0) return ret; - tfm = crypto_alloc_akcipher(alg_name, 0, 0); + tfm = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); - ret = -ENOMEM; - req = akcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) - goto error_free_tfm; - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); - if (!key) - goto error_free_req; + if (!key) { + ret = -ENOMEM; + goto error_free_tfm; + } memcpy(key, pkey->key, pkey->keylen); ptr = key + pkey->keylen; @@ -352,36 +419,19 @@ int public_key_verify_signature(const struct public_key *pkey, memcpy(ptr, pkey->params, pkey->paramlen); if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + ret = crypto_sig_set_privkey(tfm, key, pkey->keylen); else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen); if (ret) goto error_free_key; - if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 && - sig->data_size) { - ret = cert_sig_digest_update(sig, tfm); - if (ret) - goto error_free_key; - } - - sg_init_table(src_sg, 2); - sg_set_buf(&src_sg[0], sig->s, sig->s_size); - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, - sig->digest_size); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - crypto_req_done, &cwait); - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); + ret = crypto_sig_verify(tfm, sig->s, sig->s_size, + sig->digest, sig->digest_size); error_free_key: - kfree(key); -error_free_req: - akcipher_request_free(req); + kfree_sensitive(key); error_free_tfm: - crypto_free_akcipher(tfm); + crypto_free_sig(tfm); pr_devel("<==%s() = %d\n", __func__, ret); if (WARN_ON_ONCE(ret > 0)) ret = -EINVAL; diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c index 84cefe3b3585..86292965f493 100644 --- a/crypto/asymmetric_keys/restrict.c +++ b/crypto/asymmetric_keys/restrict.c @@ -17,9 +17,12 @@ static struct asymmetric_key_id *ca_keyid; #ifndef MODULE static struct { - struct asymmetric_key_id id; - unsigned char data[10]; + /* Must be last as it ends in a flexible-array member. */ + TRAILING_OVERLAP(struct asymmetric_key_id, id, data, + unsigned char data[10]; + ); } cakey; +static_assert(offsetof(typeof(cakey), id.data) == offsetof(typeof(cakey), data)); static int __init ca_keys_setup(char *str) { @@ -87,7 +90,7 @@ int restrict_link_by_signature(struct key *dest_keyring, sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; - if (!sig->auth_ids[0] && !sig->auth_ids[1]) + if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid)) @@ -96,23 +99,111 @@ int restrict_link_by_signature(struct key *dest_keyring, /* See if we have a key that signed this one. */ key = find_asymmetric_key(trust_keyring, sig->auth_ids[0], sig->auth_ids[1], - false); + sig->auth_ids[2], false); if (IS_ERR(key)) return -ENOKEY; if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags)) ret = -ENOKEY; + else if (IS_BUILTIN(CONFIG_SECONDARY_TRUSTED_KEYRING_SIGNED_BY_BUILTIN) && + !strcmp(dest_keyring->description, ".secondary_trusted_keys") && + !test_bit(KEY_FLAG_BUILTIN, &key->flags)) + ret = -ENOKEY; else ret = verify_signature(key, sig); key_put(key); return ret; } -static bool match_either_id(const struct asymmetric_key_ids *pair, +/** + * restrict_link_by_ca - Restrict additions to a ring of CA keys + * @dest_keyring: Keyring being linked to. + * @type: The type of key being added. + * @payload: The payload of the new key. + * @trust_keyring: Unused. + * + * Check if the new certificate is a CA. If it is a CA, then mark the new + * certificate as being ok to link. + * + * Returns 0 if the new certificate was accepted, -ENOKEY if the + * certificate is not a CA. -ENOPKG if the signature uses unsupported + * crypto, or some other error if there is a matching certificate but + * the signature check cannot be performed. + */ +int restrict_link_by_ca(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring) +{ + const struct public_key *pkey; + + if (type != &key_type_asymmetric) + return -EOPNOTSUPP; + + pkey = payload->data[asym_crypto]; + if (!pkey) + return -ENOPKG; + if (!test_bit(KEY_EFLAG_CA, &pkey->key_eflags)) + return -ENOKEY; + if (!test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags)) + return -ENOKEY; + if (!IS_ENABLED(CONFIG_INTEGRITY_CA_MACHINE_KEYRING_MAX)) + return 0; + if (test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags)) + return -ENOKEY; + + return 0; +} + +/** + * restrict_link_by_digsig - Restrict additions to a ring of digsig keys + * @dest_keyring: Keyring being linked to. + * @type: The type of key being added. + * @payload: The payload of the new key. + * @trust_keyring: A ring of keys that can be used to vouch for the new cert. + * + * Check if the new certificate has digitalSignature usage set. If it is, + * then mark the new certificate as being ok to link. Afterwards verify + * the new certificate against the ones in the trust_keyring. + * + * Returns 0 if the new certificate was accepted, -ENOKEY if the + * certificate is not a digsig. -ENOPKG if the signature uses unsupported + * crypto, or some other error if there is a matching certificate but + * the signature check cannot be performed. + */ +int restrict_link_by_digsig(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring) +{ + const struct public_key *pkey; + + if (type != &key_type_asymmetric) + return -EOPNOTSUPP; + + pkey = payload->data[asym_crypto]; + + if (!pkey) + return -ENOPKG; + + if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags)) + return -ENOKEY; + + if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags)) + return -ENOKEY; + + if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags)) + return -ENOKEY; + + return restrict_link_by_signature(dest_keyring, type, payload, + trust_keyring); +} + +static bool match_either_id(const struct asymmetric_key_id **pair, const struct asymmetric_key_id *single) { - return (asymmetric_key_id_same(pair->id[0], single) || - asymmetric_key_id_same(pair->id[1], single)); + return (asymmetric_key_id_same(pair[0], single) || + asymmetric_key_id_same(pair[1], single)); } static int key_or_keyring_common(struct key *dest_keyring, @@ -140,20 +231,22 @@ static int key_or_keyring_common(struct key *dest_keyring, sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; - if (!sig->auth_ids[0] && !sig->auth_ids[1]) + if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (trusted) { if (trusted->type == &key_type_keyring) { /* See if we have a key that signed this one. */ key = find_asymmetric_key(trusted, sig->auth_ids[0], - sig->auth_ids[1], false); + sig->auth_ids[1], + sig->auth_ids[2], false); if (IS_ERR(key)) key = NULL; } else if (trusted->type == &key_type_asymmetric) { - const struct asymmetric_key_ids *signer_ids; + const struct asymmetric_key_id **signer_ids; - signer_ids = asymmetric_key_ids(trusted); + signer_ids = (const struct asymmetric_key_id **) + asymmetric_key_ids(trusted)->id; /* * The auth_ids come from the candidate key (the @@ -164,22 +257,29 @@ static int key_or_keyring_common(struct key *dest_keyring, * The signer_ids are identifiers for the * signing key specified for dest_keyring. * - * The first auth_id is the preferred id, and - * the second is the fallback. If only one - * auth_id is present, it may match against - * either signer_id. If two auth_ids are - * present, the first auth_id must match one - * signer_id and the second auth_id must match - * the second signer_id. + * The first auth_id is the preferred id, 2nd and + * 3rd are the fallbacks. If exactly one of + * auth_ids[0] and auth_ids[1] is present, it may + * match either signer_ids[0] or signed_ids[1]. + * If both are present the first one may match + * either signed_id but the second one must match + * the second signer_id. If neither of them is + * available, auth_ids[2] is matched against + * signer_ids[2] as a fallback. */ - if (!sig->auth_ids[0] || !sig->auth_ids[1]) { + if (!sig->auth_ids[0] && !sig->auth_ids[1]) { + if (asymmetric_key_id_same(signer_ids[2], + sig->auth_ids[2])) + key = __key_get(trusted); + + } else if (!sig->auth_ids[0] || !sig->auth_ids[1]) { const struct asymmetric_key_id *auth_id; auth_id = sig->auth_ids[0] ?: sig->auth_ids[1]; if (match_either_id(signer_ids, auth_id)) key = __key_get(trusted); - } else if (asymmetric_key_id_same(signer_ids->id[1], + } else if (asymmetric_key_id_same(signer_ids[1], sig->auth_ids[1]) && match_either_id(signer_ids, sig->auth_ids[0])) { @@ -193,7 +293,8 @@ static int key_or_keyring_common(struct key *dest_keyring, if (check_dest && !key) { /* See if the destination has a key that signed this one. */ key = find_asymmetric_key(dest_keyring, sig->auth_ids[0], - sig->auth_ids[1], false); + sig->auth_ids[1], sig->auth_ids[2], + false); if (IS_ERR(key)) key = NULL; } diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c new file mode 100644 index 000000000000..98dc5cdfdebe --- /dev/null +++ b/crypto/asymmetric_keys/selftest.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Self-testing for signature checking. + * + * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <crypto/pkcs7.h> +#include <linux/cred.h> +#include <linux/kernel.h> +#include <linux/key.h> +#include <linux/module.h> +#include "selftest.h" +#include "x509_parser.h" + +void fips_signature_selftest(const char *name, + const u8 *keys, size_t keys_len, + const u8 *data, size_t data_len, + const u8 *sig, size_t sig_len) +{ + struct key *keyring; + int ret; + + pr_notice("Running certificate verification %s selftest\n", name); + + keyring = keyring_alloc(".certs_selftest", + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), + (KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | + KEY_USR_SEARCH, + KEY_ALLOC_NOT_IN_QUOTA, + NULL, NULL); + if (IS_ERR(keyring)) + panic("Can't allocate certs %s selftest keyring: %ld\n", name, PTR_ERR(keyring)); + + ret = x509_load_certificate_list(keys, keys_len, keyring); + if (ret < 0) + panic("Can't allocate certs %s selftest keyring: %d\n", name, ret); + + struct pkcs7_message *pkcs7; + + pkcs7 = pkcs7_parse_message(sig, sig_len); + if (IS_ERR(pkcs7)) + panic("Certs %s selftest: pkcs7_parse_message() = %d\n", name, ret); + + pkcs7_supply_detached_data(pkcs7, data, data_len); + + ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE); + if (ret < 0) + panic("Certs %s selftest: pkcs7_verify() = %d\n", name, ret); + + ret = pkcs7_validate_trust(pkcs7, keyring); + if (ret < 0) + panic("Certs %s selftest: pkcs7_validate_trust() = %d\n", name, ret); + + pkcs7_free_message(pkcs7); + + key_put(keyring); +} + +static int __init fips_signature_selftest_init(void) +{ + fips_signature_selftest_rsa(); + fips_signature_selftest_ecdsa(); + return 0; +} + +late_initcall(fips_signature_selftest_init); + +MODULE_DESCRIPTION("X.509 self tests"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/selftest.h b/crypto/asymmetric_keys/selftest.h new file mode 100644 index 000000000000..4139f05906cb --- /dev/null +++ b/crypto/asymmetric_keys/selftest.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Helper function for self-testing PKCS#7 signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +void fips_signature_selftest(const char *name, + const u8 *keys, size_t keys_len, + const u8 *data, size_t data_len, + const u8 *sig, size_t sig_len); + +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_RSA +void __init fips_signature_selftest_rsa(void); +#else +static inline void __init fips_signature_selftest_rsa(void) { } +#endif + +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA +void __init fips_signature_selftest_ecdsa(void); +#else +static inline void __init fips_signature_selftest_ecdsa(void) { } +#endif diff --git a/crypto/asymmetric_keys/selftest_ecdsa.c b/crypto/asymmetric_keys/selftest_ecdsa.c new file mode 100644 index 000000000000..20a0868b30de --- /dev/null +++ b/crypto/asymmetric_keys/selftest_ecdsa.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Self-tests for PKCS#7 ECDSA signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +#include <linux/module.h> +#include "selftest.h" + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const u8 certs_selftest_ecdsa_keys[] __initconst = { + /* P-256 ECDSA certificate */ + "\x30\x82\x01\xd4\x30\x82\x01\x7b\xa0\x03\x02\x01\x02\x02\x14\x2e" + "\xea\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0" + "\xf8\x6f\xde\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x30" + "\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74" + "\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61" + "\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d" + "\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32" + "\x34\x30\x34\x31\x33\x32\x32\x31\x36\x32\x36\x5a\x18\x0f\x32\x31" + "\x32\x34\x30\x33\x32\x30\x32\x32\x31\x36\x32\x36\x5a\x30\x3a\x31" + "\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74\x69\x66" + "\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69" + "\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x59\x30\x13\x06\x07\x2a" + "\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07" + "\x03\x42\x00\x04\x07\xe5\x6b\x51\xaf\xfc\x19\x41\x2c\x88\x92\x6b" + "\x77\x57\x71\x03\x9e\xe2\xfe\x6e\x6a\x71\x4e\xc7\x29\x9f\x90\xe1" + "\x77\x18\x9f\xc2\xe7\x0a\x82\xd0\x8a\xe1\x81\xa9\x71\x7c\x5a\x73" + "\xfb\x25\xb9\x5b\x1e\x24\x8c\x73\x9f\xf8\x38\xf8\x48\xb4\xad\x16" + "\x19\xc0\x22\xc6\xa3\x5d\x30\x5b\x30\x1d\x06\x03\x55\x1d\x0e\x04" + "\x16\x04\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2\x3d" + "\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x1f\x06\x03\x55\x1d\x23\x04\x18" + "\x30\x16\x80\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2" + "\x3d\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x0c\x06\x03\x55\x1d\x13\x01" + "\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04\x04\x03" + "\x02\x07\x80\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x03" + "\x47\x00\x30\x44\x02\x20\x1a\xd7\xac\x07\xc8\x97\x38\xf4\x89\x43" + "\x7e\xc7\x66\x6e\xa5\x00\x7c\x12\x1d\xb4\x09\x76\x0c\x99\x6b\x8c" + "\x26\x5d\xe9\x70\x5c\xb4\x02\x20\x73\xb7\xc7\x7a\x5a\xdb\x67\x0a" + "\x96\x42\x19\xcf\x4f\x67\x4f\x35\x6a\xee\x29\x25\xf2\x4f\xc8\x10" + "\x14\x9d\x79\x69\x1c\x7a\xd7\x5d" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const u8 certs_selftest_ecdsa_data[] __initconst = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const u8 certs_selftest_ecdsa_sig[] __initconst = { + /* ECDSA signature using SHA-256 */ + "\x30\x81\xf4\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0\x81" + "\xe6\x30\x81\xe3\x02\x01\x01\x31\x0f\x30\x0d\x06\x09\x60\x86\x48" + "\x01\x65\x03\x04\x02\x01\x05\x00\x30\x0b\x06\x09\x2a\x86\x48\x86" + "\xf7\x0d\x01\x07\x01\x31\x81\xbf\x30\x81\xbc\x02\x01\x01\x30\x52" + "\x30\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66" + "\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x2e\xea" + "\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0\xf8" + "\x6f\xde\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05" + "\x00\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x04\x48\x30" + "\x46\x02\x21\x00\x86\xd1\xf4\x06\xb6\x49\x79\xf9\x09\x5f\x35\x1a" + "\x94\x7e\x0e\x1a\x12\x4d\xd9\xe6\x2a\x2d\xcf\x2d\x0a\xee\x88\x76" + "\xe0\x35\xf3\xeb\x02\x21\x00\xdf\x11\x8a\xab\x31\xf6\x3c\x1f\x32" + "\x43\x94\xe2\xb8\x35\xc9\xf3\x12\x4e\x9b\x31\x08\x10\x5d\x8d\xe2" + "\x43\x0a\x5f\xf5\xfd\xa2\xf1" +}; + +void __init fips_signature_selftest_ecdsa(void) +{ + fips_signature_selftest("ECDSA", + certs_selftest_ecdsa_keys, + sizeof(certs_selftest_ecdsa_keys) - 1, + certs_selftest_ecdsa_data, + sizeof(certs_selftest_ecdsa_data) - 1, + certs_selftest_ecdsa_sig, + sizeof(certs_selftest_ecdsa_sig) - 1); +} diff --git a/crypto/asymmetric_keys/selftest_rsa.c b/crypto/asymmetric_keys/selftest_rsa.c new file mode 100644 index 000000000000..09c9815e456a --- /dev/null +++ b/crypto/asymmetric_keys/selftest_rsa.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Self-tests for PKCS#7 RSA signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +#include <linux/module.h> +#include "selftest.h" + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const u8 certs_selftest_rsa_keys[] __initconst = { + /* 4096-bit RSA certificate */ + "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73" + "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a" + "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b" + "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43" + "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66" + "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73" + "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35" + "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30" + "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30" + "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61" + "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79" + "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01" + "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01" + "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f" + "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99" + "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92" + "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77" + "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55" + "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d" + "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44" + "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01" + "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7" + "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68" + "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6" + "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81" + "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78" + "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6" + "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f" + "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62" + "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94" + "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48" + "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c" + "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45" + "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a" + "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68" + "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09" + "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95" + "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec" + "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe" + "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42" + "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35" + "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4" + "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f" + "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a" + "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc" + "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d" + "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04" + "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14" + "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17" + "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80" + "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88" + "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01" + "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85" + "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47" + "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88" + "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12" + "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f" + "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71" + "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b" + "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56" + "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f" + "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02" + "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52" + "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a" + "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99" + "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa" + "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87" + "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13" + "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f" + "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66" + "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07" + "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05" + "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04" + "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c" + "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb" + "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48" + "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d" + "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4" + "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd" + "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71" + "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70" + "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46" + "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0" + "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62" + "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const u8 certs_selftest_rsa_data[] __initconst = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const u8 certs_selftest_rsa_sig[] __initconst = { + /* RSA signature using PKCS#1 v1.5 padding with SHA-256 */ + "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0" + "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09" + "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48" + "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01" + "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29" + "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69" + "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d" + "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30" + "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09" + "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac" + "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3" + "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c" + "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00" + "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb" + "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11" + "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33" + "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23" + "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37" + "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab" + "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c" + "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41" + "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6" + "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72" + "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87" + "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4" + "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92" + "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79" + "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b" + "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0" + "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f" + "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e" + "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31" + "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24" + "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14" + "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe" + "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8" + "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e" + "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36" + "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb" + "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64" + "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51" + "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2" +}; + +void __init fips_signature_selftest_rsa(void) +{ + fips_signature_selftest("RSA", + certs_selftest_rsa_keys, + sizeof(certs_selftest_rsa_keys) - 1, + certs_selftest_rsa_data, + sizeof(certs_selftest_rsa_data) - 1, + certs_selftest_rsa_sig, + sizeof(certs_selftest_rsa_sig) - 1); +} diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 4aff3eebec17..041d04b5c953 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -35,7 +35,7 @@ void public_key_signature_free(struct public_key_signature *sig) EXPORT_SYMBOL_GPL(public_key_signature_free); /** - * query_asymmetric_key - Get information about an aymmetric key. + * query_asymmetric_key - Get information about an asymmetric key. * @params: Various parameters. * @info: Where to put the information. */ @@ -65,69 +65,6 @@ int query_asymmetric_key(const struct kernel_pkey_params *params, EXPORT_SYMBOL_GPL(query_asymmetric_key); /** - * encrypt_blob - Encrypt data using an asymmetric key - * @params: Various parameters - * @data: Data blob to be encrypted, length params->data_len - * @enc: Encrypted data buffer, length params->enc_len - * - * Encrypt the specified data blob using the private key specified by - * params->key. The encrypted data is wrapped in an encoding if - * params->encoding is specified (eg. "pkcs1"). - * - * Returns the length of the data placed in the encrypted data buffer or an - * error. - */ -int encrypt_blob(struct kernel_pkey_params *params, - const void *data, void *enc) -{ - params->op = kernel_pkey_encrypt; - return asymmetric_key_eds_op(params, data, enc); -} -EXPORT_SYMBOL_GPL(encrypt_blob); - -/** - * decrypt_blob - Decrypt data using an asymmetric key - * @params: Various parameters - * @enc: Encrypted data to be decrypted, length params->enc_len - * @data: Decrypted data buffer, length params->data_len - * - * Decrypt the specified data blob using the private key specified by - * params->key. The decrypted data is wrapped in an encoding if - * params->encoding is specified (eg. "pkcs1"). - * - * Returns the length of the data placed in the decrypted data buffer or an - * error. - */ -int decrypt_blob(struct kernel_pkey_params *params, - const void *enc, void *data) -{ - params->op = kernel_pkey_decrypt; - return asymmetric_key_eds_op(params, enc, data); -} -EXPORT_SYMBOL_GPL(decrypt_blob); - -/** - * create_signature - Sign some data using an asymmetric key - * @params: Various parameters - * @data: Data blob to be signed, length params->data_len - * @enc: Signature buffer, length params->enc_len - * - * Sign the specified data blob using the private key specified by params->key. - * The signature is wrapped in an encoding if params->encoding is specified - * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be - * passed through params->hash_algo (eg. "sha1"). - * - * Returns the length of the data placed in the signature buffer or an error. - */ -int create_signature(struct kernel_pkey_params *params, - const void *data, void *enc) -{ - params->op = kernel_pkey_sign; - return asymmetric_key_eds_op(params, data, enc); -} -EXPORT_SYMBOL_GPL(create_signature); - -/** * verify_signature - Initiate the use of an asymmetric key to verify a signature * @key: The asymmetric key to verify against * @sig: The signature to check diff --git a/crypto/asymmetric_keys/tpm.asn1 b/crypto/asymmetric_keys/tpm.asn1 deleted file mode 100644 index d7f194232f30..000000000000 --- a/crypto/asymmetric_keys/tpm.asn1 +++ /dev/null @@ -1,5 +0,0 @@ --- --- Unencryted TPM Blob. For details of the format, see: --- http://david.woodhou.se/draft-woodhouse-cert-best-practice.html#I-D.mavrogiannopoulos-tpmuri --- -PrivateKeyInfo ::= OCTET STRING ({ tpm_note_key }) diff --git a/crypto/asymmetric_keys/tpm_parser.c b/crypto/asymmetric_keys/tpm_parser.c deleted file mode 100644 index 96405d8dcd98..000000000000 --- a/crypto/asymmetric_keys/tpm_parser.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define pr_fmt(fmt) "TPM-PARSER: "fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <keys/asymmetric-subtype.h> -#include <keys/asymmetric-parser.h> -#include <crypto/asym_tpm_subtype.h> -#include "tpm.asn1.h" - -struct tpm_parse_context { - const void *blob; - u32 blob_len; -}; - -/* - * Note the key data of the ASN.1 blob. - */ -int tpm_note_key(void *context, size_t hdrlen, - unsigned char tag, - const void *value, size_t vlen) -{ - struct tpm_parse_context *ctx = context; - - ctx->blob = value; - ctx->blob_len = vlen; - - return 0; -} - -/* - * Parse a TPM-encrypted private key blob. - */ -static struct tpm_key *tpm_parse(const void *data, size_t datalen) -{ - struct tpm_parse_context ctx; - long ret; - - memset(&ctx, 0, sizeof(ctx)); - - /* Attempt to decode the private key */ - ret = asn1_ber_decoder(&tpm_decoder, &ctx, data, datalen); - if (ret < 0) - goto error; - - return tpm_key_create(ctx.blob, ctx.blob_len); - -error: - return ERR_PTR(ret); -} -/* - * Attempt to parse a data blob for a key as a TPM private key blob. - */ -static int tpm_key_preparse(struct key_preparsed_payload *prep) -{ - struct tpm_key *tk; - - /* - * TPM 1.2 keys are max 2048 bits long, so assume the blob is no - * more than 4x that - */ - if (prep->datalen > 256 * 4) - return -EMSGSIZE; - - tk = tpm_parse(prep->data, prep->datalen); - - if (IS_ERR(tk)) - return PTR_ERR(tk); - - /* We're pinning the module by being linked against it */ - __module_get(asym_tpm_subtype.owner); - prep->payload.data[asym_subtype] = &asym_tpm_subtype; - prep->payload.data[asym_key_ids] = NULL; - prep->payload.data[asym_crypto] = tk; - prep->payload.data[asym_auth] = NULL; - prep->quotalen = 100; - return 0; -} - -static struct asymmetric_key_parser tpm_key_parser = { - .owner = THIS_MODULE, - .name = "tpm_parser", - .parse = tpm_key_preparse, -}; - -static int __init tpm_key_init(void) -{ - return register_asymmetric_key_parser(&tpm_key_parser); -} - -static void __exit tpm_key_exit(void) -{ - unregister_asymmetric_key_parser(&tpm_key_parser); -} - -module_init(tpm_key_init); -module_exit(tpm_key_exit); - -MODULE_DESCRIPTION("TPM private key-blob parser"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index 7553ab18db89..1f3b227ba7f2 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -28,7 +28,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, const struct pe32plus_opt_hdr *pe64; const struct data_directory *ddir; const struct data_dirent *dde; - const struct section_header *secs, *sec; + const struct section_header *sec; size_t cursor, datalen = pelen; kenter(""); @@ -40,13 +40,13 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, } while (0) chkaddr(0, 0, sizeof(*mz)); - if (mz->magic != MZ_MAGIC) + if (mz->magic != IMAGE_DOS_SIGNATURE) return -ELIBBAD; cursor = sizeof(*mz); chkaddr(cursor, mz->peaddr, sizeof(*pe)); pe = pebuf + mz->peaddr; - if (pe->magic != PE_MAGIC) + if (pe->magic != IMAGE_NT_SIGNATURE) return -ELIBBAD; cursor = mz->peaddr + sizeof(*pe); @@ -55,7 +55,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, pe64 = pebuf + cursor; switch (pe32->magic) { - case PE_OPT_MAGIC_PE32: + case IMAGE_NT_OPTIONAL_HDR32_MAGIC: chkaddr(0, cursor, sizeof(*pe32)); ctx->image_checksum_offset = (unsigned long)&pe32->csum - (unsigned long)pebuf; @@ -64,7 +64,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, ctx->n_data_dirents = pe32->data_dirs; break; - case PE_OPT_MAGIC_PE32PLUS: + case IMAGE_NT_OPTIONAL_HDR64_MAGIC: chkaddr(0, cursor, sizeof(*pe64)); ctx->image_checksum_offset = (unsigned long)&pe64->csum - (unsigned long)pebuf; @@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, break; default: - pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic); + pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic); return -ELIBBAD; } @@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, ctx->certs_size = ddir->certs.size; if (!ddir->certs.virtual_address || !ddir->certs.size) { - pr_debug("Unsigned PE binary\n"); + pr_warn("Unsigned PE binary\n"); return -ENODATA; } @@ -110,7 +110,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, ctx->n_sections = pe->sections; if (ctx->n_sections > (ctx->header_size - cursor) / sizeof(*sec)) return -ELIBBAD; - ctx->secs = secs = pebuf + cursor; + ctx->secs = pebuf + cursor; return 0; } @@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, unsigned len; if (ctx->sig_len < sizeof(wrapper)) { - pr_debug("Signature wrapper too short\n"); + pr_warn("Signature wrapper too short\n"); return -ELIBBAD; } @@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf, pr_debug("sig wrapper = { %x, %x, %x }\n", wrapper.length, wrapper.revision, wrapper.cert_type); - /* Both pesign and sbsign round up the length of certificate table - * (in optional header data directories) to 8 byte alignment. + /* sbsign rounds up the length of certificate table (in optional + * header data directories) to 8 byte alignment. However, the PE + * specification states that while entries are 8-byte aligned, this is + * not included in their length, and as a result, pesign has not + * rounded up since 0.110. */ - if (round_up(wrapper.length, 8) != ctx->sig_len) { - pr_debug("Signature wrapper len wrong\n"); + if (wrapper.length > ctx->sig_len) { + pr_warn("Signature wrapper bigger than sig len (%x > %x)\n", + ctx->sig_len, wrapper.length); return -ELIBBAD; } if (wrapper.revision != WIN_CERT_REVISION_2_0) { - pr_debug("Signature is not revision 2.0\n"); + pr_warn("Signature is not revision 2.0\n"); return -ENOTSUPP; } if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) { - pr_debug("Signature certificate type is not PKCS\n"); + pr_warn("Signature certificate type is not PKCS\n"); return -ENOTSUPP; } @@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, ctx->sig_offset += sizeof(wrapper); ctx->sig_len -= sizeof(wrapper); if (ctx->sig_len < 4) { - pr_debug("Signature data missing\n"); + pr_warn("Signature data missing\n"); return -EKEYREJECTED; } @@ -194,7 +198,7 @@ check_len: return 0; } not_pkcs7: - pr_debug("Signature data not PKCS#7\n"); + pr_warn("Signature data not PKCS#7\n"); return -ELIBBAD; } @@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, digest_size = crypto_shash_digestsize(tfm); if (digest_size != ctx->digest_len) { - pr_debug("Digest size mismatch (%zx != %x)\n", - digest_size, ctx->digest_len); + pr_warn("Digest size mismatch (%zx != %x)\n", + digest_size, ctx->digest_len); ret = -EBADMSG; goto error_no_desc; } @@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, * PKCS#7 certificate. */ if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { - pr_debug("Digest mismatch\n"); + pr_warn("Digest mismatch\n"); ret = -EKEYREJECTED; } else { pr_debug("The digests match!\n"); @@ -387,7 +391,7 @@ error_no_desc: * verify_pefile_signature - Verify the signature on a PE binary image * @pebuf: Buffer containing the PE binary image * @pelen: Length of the binary image - * @trust_keys: Signing certificate(s) to use as starting points + * @trusted_keys: Signing certificate(s) to use as starting points * @usage: The use to which the key is being put. * * Validate that the certificate chain inside the PKCS#7 message inside the PE diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1 index 5c9f4e4a5231..feb9573cacce 100644 --- a/crypto/asymmetric_keys/x509.asn1 +++ b/crypto/asymmetric_keys/x509.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5280#section-4 + Certificate ::= SEQUENCE { tbsCertificate TBSCertificate ({ x509_note_tbs_certificate }), signatureAlgorithm AlgorithmIdentifier, @@ -7,7 +14,7 @@ Certificate ::= SEQUENCE { TBSCertificate ::= SEQUENCE { version [ 0 ] Version DEFAULT, serialNumber CertificateSerialNumber ({ x509_note_serial }), - signature AlgorithmIdentifier ({ x509_note_pkey_algo }), + signature AlgorithmIdentifier ({ x509_note_sig_algo }), issuer Name ({ x509_note_issuer }), validity Validity, subject Name ({ x509_note_subject }), diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1 index 1a33231a75a8..0f8355cf1907 100644 --- a/crypto/asymmetric_keys/x509_akid.asn1 +++ b/crypto/asymmetric_keys/x509_akid.asn1 @@ -1,3 +1,8 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- -- X.509 AuthorityKeyIdentifier -- rfc5280 section 4.2.1.1 @@ -14,15 +19,15 @@ CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial }) GeneralNames ::= SEQUENCE OF GeneralName GeneralName ::= CHOICE { - otherName [0] ANY, - rfc822Name [1] IA5String, - dNSName [2] IA5String, + otherName [0] IMPLICIT OtherName, + rfc822Name [1] IMPLICIT IA5String, + dNSName [2] IMPLICIT IA5String, x400Address [3] ANY, directoryName [4] Name ({ x509_akid_note_name }), - ediPartyName [5] ANY, - uniformResourceIdentifier [6] IA5String, - iPAddress [7] OCTET STRING, - registeredID [8] OBJECT IDENTIFIER + ediPartyName [5] IMPLICIT EDIPartyName, + uniformResourceIdentifier [6] IMPLICIT IA5String, + iPAddress [7] IMPLICIT OCTET STRING, + registeredID [8] IMPLICIT OBJECT IDENTIFIER } Name ::= SEQUENCE OF RelativeDistinguishedName @@ -33,3 +38,13 @@ AttributeValueAssertion ::= SEQUENCE { attributeType OBJECT IDENTIFIER ({ x509_note_OID }), attributeValue ANY ({ x509_extract_name_segment }) } + +OtherName ::= SEQUENCE { + type-id OBJECT IDENTIFIER, + value [0] ANY + } + +EDIPartyName ::= SEQUENCE { + nameAssigner [0] ANY OPTIONAL, + partyName [1] ANY + } diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 6d003096b5bc..b37cae914987 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -19,15 +19,13 @@ struct x509_parse_context { struct x509_certificate *cert; /* Certificate being constructed */ unsigned long data; /* Start of data */ - const void *cert_start; /* Start of cert content */ const void *key; /* Key data */ size_t key_size; /* Size of key data */ const void *params; /* Key parameters */ size_t params_size; /* Size of key parameters */ - enum OID key_algo; /* Public key algorithm */ + enum OID key_algo; /* Algorithm used by the cert's key */ enum OID last_oid; /* Last OID encountered */ - enum OID algo_oid; /* Algorithm OID */ - unsigned char nr_mpi; /* Number of MPIs stored */ + enum OID sig_algo; /* Algorithm used to sign the cert */ u8 o_size; /* Size of organizationName (O) */ u8 cn_size; /* Size of commonName (CN) */ u8 email_size; /* Size of emailAddress */ @@ -62,24 +60,23 @@ EXPORT_SYMBOL_GPL(x509_free_certificate); */ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) { - struct x509_certificate *cert; - struct x509_parse_context *ctx; + struct x509_certificate *cert __free(x509_free_certificate) = NULL; + struct x509_parse_context *ctx __free(kfree) = NULL; struct asymmetric_key_id *kid; long ret; - ret = -ENOMEM; cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL); if (!cert) - goto error_no_cert; + return ERR_PTR(-ENOMEM); cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!cert->pub) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!cert->sig) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); if (!ctx) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx->cert = cert; ctx->data = (unsigned long)data; @@ -87,7 +84,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Attempt to decode the certificate */ ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Decode the AuthorityKeyIdentifier */ if (ctx->raw_akid) { @@ -97,20 +94,19 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) ctx->raw_akid, ctx->raw_akid_size); if (ret < 0) { pr_warn("Couldn't decode AuthKeyIdentifier\n"); - goto error_decode; + return ERR_PTR(ret); } } - ret = -ENOMEM; cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); if (!cert->pub->key) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->keylen = ctx->key_size; cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL); if (!cert->pub->params) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->paramlen = ctx->params_size; cert->pub->algo = ctx->key_algo; @@ -118,33 +114,23 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Grab the signature bits */ ret = x509_get_sig_params(cert); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Generate cert issuer + serial number key ID */ kid = asymmetric_key_generate_id(cert->raw_serial, cert->raw_serial_size, cert->raw_issuer, cert->raw_issuer_size); - if (IS_ERR(kid)) { - ret = PTR_ERR(kid); - goto error_decode; - } + if (IS_ERR(kid)) + return ERR_CAST(kid); cert->id = kid; /* Detect self-signed certificates */ ret = x509_check_for_self_signed(cert); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); - kfree(ctx); - return cert; - -error_decode: - kfree(ctx); -error_no_ctx: - x509_free_certificate(cert); -error_no_cert: - return ERR_PTR(ret); + return_ptr(cert); } EXPORT_SYMBOL_GPL(x509_cert_parse); @@ -187,26 +173,19 @@ int x509_note_tbs_certificate(void *context, size_t hdrlen, } /* - * Record the public key algorithm + * Record the algorithm that was used to sign this certificate. */ -int x509_note_pkey_algo(void *context, size_t hdrlen, - unsigned char tag, - const void *value, size_t vlen) +int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { - case OID_md2WithRSAEncryption: - case OID_md3WithRSAEncryption: default: return -ENOPKG; /* Unsupported combination */ - case OID_md4WithRSAEncryption: - ctx->cert->sig->hash_algo = "md4"; - goto rsa_pkcs1; - case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; @@ -231,6 +210,18 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, ctx->cert->sig->hash_algo = "sha1"; goto ecdsa; + case OID_id_rsassa_pkcs1_v1_5_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto rsa_pkcs1; + case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; @@ -247,6 +238,18 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, ctx->cert->sig->hash_algo = "sha512"; goto ecdsa; + case OID_id_ecdsa_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto ecdsa; + case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; @@ -254,31 +257,22 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, case OID_gost2012Signature512: ctx->cert->sig->hash_algo = "streebog512"; goto ecrdsa; - - case OID_SM2_with_SM3: - ctx->cert->sig->hash_algo = "sm3"; - goto sm2; } rsa_pkcs1: ctx->cert->sig->pkey_algo = "rsa"; ctx->cert->sig->encoding = "pkcs1"; - ctx->algo_oid = ctx->last_oid; + ctx->sig_algo = ctx->last_oid; return 0; ecrdsa: ctx->cert->sig->pkey_algo = "ecrdsa"; ctx->cert->sig->encoding = "raw"; - ctx->algo_oid = ctx->last_oid; - return 0; -sm2: - ctx->cert->sig->pkey_algo = "sm2"; - ctx->cert->sig->encoding = "raw"; - ctx->algo_oid = ctx->last_oid; + ctx->sig_algo = ctx->last_oid; return 0; ecdsa: ctx->cert->sig->pkey_algo = "ecdsa"; ctx->cert->sig->encoding = "x962"; - ctx->algo_oid = ctx->last_oid; + ctx->sig_algo = ctx->last_oid; return 0; } @@ -291,17 +285,21 @@ int x509_note_signature(void *context, size_t hdrlen, { struct x509_parse_context *ctx = context; - pr_debug("Signature type: %u size %zu\n", ctx->last_oid, vlen); + pr_debug("Signature: alg=%u, size=%zu\n", ctx->last_oid, vlen); - if (ctx->last_oid != ctx->algo_oid) { - pr_warn("Got cert with pkey (%u) and sig (%u) algorithm OIDs\n", - ctx->algo_oid, ctx->last_oid); + /* + * In X.509 certificates, the signature's algorithm is stored in two + * places: inside the TBSCertificate (the data that is signed), and + * alongside the signature. These *must* match. + */ + if (ctx->last_oid != ctx->sig_algo) { + pr_warn("signatureAlgorithm (%u) differs from tbsCertificate.signature (%u)\n", + ctx->last_oid, ctx->sig_algo); return -EINVAL; } if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || - strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) @@ -374,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, /* Empty name string if no material */ if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { - buffer = kmalloc(1, GFP_KERNEL); + buffer = kzalloc(1, GFP_KERNEL); if (!buffer) return -ENOMEM; - buffer[0] = 0; goto done; } @@ -441,8 +438,18 @@ int x509_note_issuer(void *context, size_t hdrlen, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; + struct asymmetric_key_id *kid; + ctx->cert->raw_issuer = value; ctx->cert->raw_issuer_size = vlen; + + if (!ctx->cert->sig->auth_ids[2]) { + kid = asymmetric_key_generate_id(value, vlen, "", 0); + if (IS_ERR(kid)) + return PTR_ERR(kid); + ctx->cert->sig->auth_ids[2] = kid; + } + return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen); } @@ -501,9 +508,6 @@ int x509_extract_key_data(void *context, size_t hdrlen, return -EBADMSG; switch (oid) { - case OID_sm2: - ctx->cert->pub->pkey_algo = "sm2"; - break; case OID_id_prime192v1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p192"; break; @@ -513,6 +517,9 @@ int x509_extract_key_data(void *context, size_t hdrlen, case OID_id_ansip384r1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; break; + case OID_id_ansip521r1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p521"; + break; default: return -ENOPKG; } @@ -564,6 +571,34 @@ int x509_process_extension(void *context, size_t hdrlen, return 0; } + if (ctx->last_oid == OID_keyUsage) { + /* + * Get hold of the keyUsage bit string + * v[1] is the encoding size + * (Expect either 0x02 or 0x03, making it 1 or 2 bytes) + * v[2] is the number of unused bits in the bit string + * (If >= 3 keyCertSign is missing when v[1] = 0x02) + * v[3] and possibly v[4] contain the bit string + * + * From RFC 5280 4.2.1.3: + * 0x04 is where keyCertSign lands in this bit string + * 0x80 is where digitalSignature lands in this bit string + */ + if (v[0] != ASN1_BTS) + return -EBADMSG; + if (vlen < 4) + return -EBADMSG; + if (v[2] >= 8) + return -EBADMSG; + if (v[3] & 0x80) + ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_DIGITALSIG; + if (v[1] == 0x02 && v[2] <= 2 && (v[3] & 0x04)) + ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN; + else if (vlen > 4 && v[1] == 0x03 && (v[3] & 0x04)) + ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN; + return 0; + } + if (ctx->last_oid == OID_authorityKeyIdentifier) { /* Get hold of the CA key fingerprint */ ctx->raw_akid = v; @@ -571,6 +606,36 @@ int x509_process_extension(void *context, size_t hdrlen, return 0; } + if (ctx->last_oid == OID_basicConstraints) { + /* + * Get hold of the basicConstraints + * v[1] is the encoding size + * (Expect 0x00 for empty SEQUENCE with CA:FALSE, or + * 0x03 or greater for non-empty SEQUENCE) + * v[2] is the encoding type + * (Expect an ASN1_BOOL for the CA) + * v[3] is the length of the ASN1_BOOL + * (Expect 1 for a single byte boolean) + * v[4] is the contents of the ASN1_BOOL + * (Expect 0xFF if the CA is TRUE) + * vlen should match the entire extension size + */ + if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ)) + return -EBADMSG; + if (vlen < 2) + return -EBADMSG; + if (v[1] != vlen - 2) + return -EBADMSG; + /* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */ + if (v[1] == 0) + return 0; + if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF) + ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA; + else + return -EBADMSG; + return 0; + } + return 0; } diff --git a/crypto/asymmetric_keys/x509_loader.c b/crypto/asymmetric_keys/x509_loader.c new file mode 100644 index 000000000000..a41741326998 --- /dev/null +++ b/crypto/asymmetric_keys/x509_loader.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/kernel.h> +#include <linux/key.h> +#include <keys/asymmetric-type.h> + +int x509_load_certificate_list(const u8 cert_list[], + const unsigned long list_size, + const struct key *keyring) +{ + key_ref_t key; + const u8 *p, *end; + size_t plen; + + p = cert_list; + end = p + list_size; + while (p < end) { + /* Each cert begins with an ASN.1 SEQUENCE tag and must be more + * than 256 bytes in size. + */ + if (end - p < 4) + goto dodgy_cert; + if (p[0] != 0x30 && + p[1] != 0x82) + goto dodgy_cert; + plen = (p[2] << 8) | p[3]; + plen += 4; + if (plen > end - p) + goto dodgy_cert; + + key = key_create_or_update(make_key_ref(keyring, 1), + "asymmetric", + NULL, + p, + plen, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_BUILT_IN | + KEY_ALLOC_BYPASS_RESTRICTION); + if (IS_ERR(key)) { + pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", + PTR_ERR(key)); + } else { + pr_notice("Loaded X.509 cert '%s'\n", + key_ref_to_ptr(key)->description); + key_ref_put(key); + } + p += plen; + } + + return 0; + +dodgy_cert: + pr_err("Problem parsing in-kernel X.509 certificate list\n"); + return 0; +} +EXPORT_SYMBOL_GPL(x509_load_certificate_list); diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index c233f136fb35..0688c222806b 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -5,6 +5,7 @@ * Written by David Howells (dhowells@redhat.com) */ +#include <linux/cleanup.h> #include <linux/time.h> #include <crypto/public_key.h> #include <keys/asymmetric-type.h> @@ -22,7 +23,7 @@ struct x509_certificate { time64_t valid_to; const void *tbs; /* Signed data */ unsigned tbs_size; /* Size of signed data */ - unsigned raw_sig_size; /* Size of sigature */ + unsigned raw_sig_size; /* Size of signature */ const void *raw_sig; /* Signature data */ const void *raw_serial; /* Raw serial number in ASN.1 */ unsigned raw_serial_size; @@ -36,7 +37,6 @@ struct x509_certificate { bool seen; /* Infinite recursion prevention */ bool verified; bool self_signed; /* T if self-signed (check unsupported_sig too) */ - bool unsupported_key; /* T if key uses unsupported crypto */ bool unsupported_sig; /* T if signature uses unsupported crypto */ bool blacklisted; }; @@ -45,6 +45,8 @@ struct x509_certificate { * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); +DEFINE_FREE(x509_free_certificate, struct x509_certificate *, + if (!IS_ERR(_T)) x509_free_certificate(_T)) extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); extern int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 3d45161b271a..12e3341e806b 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -6,13 +6,14 @@ */ #define pr_fmt(fmt) "X.509: "fmt +#include <crypto/hash.h> +#include <keys/asymmetric-parser.h> +#include <keys/asymmetric-subtype.h> +#include <keys/system_keyring.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <keys/asymmetric-subtype.h> -#include <keys/asymmetric-parser.h> -#include <keys/system_keyring.h> -#include <crypto/hash.h> +#include <linux/string.h> #include "asymmetric_keys.h" #include "x509_parser.h" @@ -30,21 +31,6 @@ int x509_get_sig_params(struct x509_certificate *cert) pr_devel("==>%s()\n", __func__); - sig->data = cert->tbs; - sig->data_size = cert->tbs_size; - - if (!cert->pub->pkey_algo) - cert->unsupported_key = true; - - if (!sig->pkey_algo) - cert->unsupported_sig = true; - - /* We check the hash if we can - even if we can't then verify it */ - if (!sig->hash_algo) { - cert->unsupported_sig = true; - return 0; - } - sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; @@ -77,11 +63,14 @@ int x509_get_sig_params(struct x509_certificate *cert) desc->tfm = tfm; - ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); + ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, + sig->digest); + if (ret < 0) goto error_2; - ret = is_hash_blacklisted(sig->digest, sig->digest_size, "tbs"); + ret = is_hash_blacklisted(sig->digest, sig->digest_size, + BLACKLIST_HASH_X509_TBS); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); @@ -128,11 +117,10 @@ int x509_check_for_self_signed(struct x509_certificate *cert) goto out; } - ret = -EKEYREJECTED; - if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 && - (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 || - strcmp(cert->sig->pkey_algo, "ecdsa") != 0)) + if (cert->unsupported_sig) { + ret = 0; goto out; + } ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { @@ -160,12 +148,11 @@ not_self_signed: */ static int x509_key_preparse(struct key_preparsed_payload *prep) { - struct asymmetric_key_ids *kids; - struct x509_certificate *cert; + struct x509_certificate *cert __free(x509_free_certificate) = NULL; + struct asymmetric_key_ids *kids __free(kfree) = NULL; + char *p, *desc __free(kfree) = NULL; const char *q; size_t srlen, sulen; - char *desc = NULL, *p; - int ret; cert = x509_cert_parse(prep->data, prep->datalen); if (IS_ERR(cert)) @@ -173,12 +160,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); - - if (cert->unsupported_key) { - ret = -ENOPKG; - goto error_free_cert; - } - pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo); pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to); @@ -193,9 +174,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) } /* Don't permit addition of blacklisted keys */ - ret = -EKEYREJECTED; if (cert->blacklisted) - goto error_free_cert; + return -EKEYREJECTED; /* Propose a description */ sulen = strlen(cert->subject); @@ -207,10 +187,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) q = cert->raw_serial; } - ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) - goto error_free_cert; + return -ENOMEM; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; @@ -220,9 +199,14 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL); if (!kids) - goto error_free_desc; + return -ENOMEM; kids->id[0] = cert->id; kids->id[1] = cert->skid; + kids->id[2] = asymmetric_key_generate_id(cert->raw_subject, + cert->raw_subject_size, + "", 0); + if (IS_ERR(kids->id[2])) + return PTR_ERR(kids->id[2]); /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); @@ -239,13 +223,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->skid = NULL; cert->sig = NULL; desc = NULL; - ret = 0; - -error_free_desc: - kfree(desc); -error_free_cert: - x509_free_certificate(cert); - return ret; + kids = NULL; + return 0; } static struct asymmetric_key_parser x509_key_parser = { diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index f9cdc5e91664..9e4bb7fbde25 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -11,8 +11,8 @@ #include <linux/async_tx.h> #include <linux/gfp.h> -/** - * pq_scribble_page - space to hold throwaway P or Q buffer for +/* + * struct pq_scribble_page - space to hold throwaway P or Q buffer for * synchronous gen_syndrome */ static struct page *pq_scribble_page; @@ -28,7 +28,7 @@ static struct page *pq_scribble_page; #define MAX_DISKS 255 -/** +/* * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * @@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan, return tx; } -/** +/* * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome */ static void @@ -119,7 +119,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, for (i = 0; i < disks; i++) { if (blocks[i] == NULL) { BUG_ON(i > disks - 3); /* P or Q can't be zero */ - srcs[i] = (void*)raid6_empty_zero_page; + srcs[i] = raid6_get_zero_page(); } else { srcs[i] = page_address(blocks[i]) + offsets[i]; @@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si /** * async_syndrome_val - asynchronously validate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 - * @offset: common offset into each block (src and dest) to start transaction + * @offsets: common offset into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 354b8cd5537f..539ea5b378dc 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -414,7 +414,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) if (blocks[i] == NULL) - ptrs[i] = (void *) raid6_empty_zero_page; + ptrs[i] = raid6_get_zero_page(); else ptrs[i] = page_address(blocks[i]) + offs[i]; @@ -497,7 +497,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) if (blocks[i] == NULL) - ptrs[i] = (void*)raid6_empty_zero_page; + ptrs[i] = raid6_get_zero_page(); else ptrs[i] = page_address(blocks[i]) + offs[i]; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 9256934312d7..ad72057a5e0d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, /** - * submit_disposition - flags for routing an incoming operation + * enum submit_disposition - flags for routing an incoming operation * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback); /** * async_tx_quiesce - ensure tx is complete and freeable upon return - * @tx - transaction to quiesce + * @tx: transaction to quiesce */ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) { diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index d8a91521144e..2c499654a36c 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -170,8 +170,8 @@ dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset, * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in - * the calculation. The assumption with dma eninges is that they only - * use the destination buffer as a source when it is explicity specified + * the calculation. The assumption with dma engines is that they only + * use the destination buffer as a source when it is explicitly specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. @@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(async_xor_offs); * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in - * the calculation. The assumption with dma eninges is that they only - * use the destination buffer as a source when it is explicity specified + * the calculation. The assumption with dma engines is that they only + * use the destination buffer as a source when it is explicitly specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. @@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset, } EXPORT_SYMBOL_GPL(async_xor_val_offs); -/** - * async_xor_val - attempt a xor parity check with a dma engine. - * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages - * @offset: offset in pages to start transaction - * @src_cnt: number of source pages - * @len: length in bytes - * @result: 0 if sum == 0 else non-zero - * @submit: submission / completion modifiers - * - * honored flags: ASYNC_TX_ACK - * - * src_list note: if the dest is also a source it must be at index zero. - * The contents of this array will be overwritten if a scribble region - * is not specified. - */ -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit) -{ - return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt, - len, result, submit); -} -EXPORT_SYMBOL_GPL(async_xor_val); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 66db82e5a3b1..d3fbee1e03e5 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -37,7 +37,7 @@ static void makedata(int disks) int i; for (i = 0; i < disks; i++) { - prandom_bytes(page_address(data[i]), PAGE_SIZE); + get_random_bytes(page_address(data[i]), PAGE_SIZE); dataptrs[i] = data[i]; dataoffs[i] = 0; } @@ -189,7 +189,7 @@ static int test(int disks, int *tests) } -static int raid6_test(void) +static int __init raid6_test(void) { int err = 0; int tests = 0; @@ -217,7 +217,7 @@ static int raid6_test(void) err += test(12, &tests); } - /* the 24 disk case is special for ioatdma as it is the boudary point + /* the 24 disk case is special for ioatdma as it is the boundary point * at which it needs to switch from 8-source ops to 16-source * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) */ @@ -236,12 +236,12 @@ static int raid6_test(void) return 0; } -static void raid6_test_exit(void) +static void __exit raid6_test_exit(void) { } /* when compiled-in wait for drivers to load first (assumes dma drivers - * are also compliled-in) + * are also compiled-in) */ late_initcall(raid6_test); module_exit(raid6_test_exit); diff --git a/crypto/authenc.c b/crypto/authenc.c index 670bf1a01d00..ac679ce2cb95 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -9,7 +9,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -28,7 +27,6 @@ struct authenc_instance_ctx { struct crypto_authenc_ctx { struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_request_ctx { @@ -39,7 +37,7 @@ struct authenc_request_ctx { static void authenc_request_complete(struct aead_request *req, int err) { - if (err != -EINPROGRESS) + if (err != -EINPROGRESS && err != -EBUSY) aead_request_complete(req, err); } @@ -109,27 +107,42 @@ out: return err; } -static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) +static void authenc_geniv_ahash_finish(struct aead_request *req) { - struct aead_request *req = areq->data; struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct aead_instance *inst = aead_alg_instance(authenc); struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); - if (err) - goto out; - scatterwalk_map_and_copy(ahreq->result, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(authenc), 1); +} -out: +static void authenc_geniv_ahash_done(void *data, int err) +{ + struct aead_request *req = data; + + if (!err) + authenc_geniv_ahash_finish(req); aead_request_complete(req, err); } -static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) +/* + * Used when the ahash request was invoked in the async callback context + * of the previous skcipher request. Eat any EINPROGRESS notifications. + */ +static void authenc_geniv_ahash_done2(void *data, int err) +{ + struct aead_request *req = data; + + if (!err) + authenc_geniv_ahash_finish(req); + authenc_request_complete(req, err); +} + +static int crypto_authenc_genicv(struct aead_request *req, unsigned int mask) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct aead_instance *inst = aead_alg_instance(authenc); @@ -138,17 +151,16 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) struct crypto_ahash *auth = ctx->auth; struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + unsigned int flags = aead_request_flags(req) & ~mask; u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->dst, hash, req->assoclen + req->cryptlen); ahash_request_set_callback(ahreq, flags, - authenc_geniv_ahash_done, req); + mask ? authenc_geniv_ahash_done2 : + authenc_geniv_ahash_done, req); err = crypto_ahash_digest(ahreq); if (err) @@ -160,35 +172,18 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) return 0; } -static void crypto_authenc_encrypt_done(struct crypto_async_request *req, - int err) +static void crypto_authenc_encrypt_done(void *data, int err) { - struct aead_request *areq = req->data; - - if (err) - goto out; + struct aead_request *areq = data; - err = crypto_authenc_genicv(areq, 0); - -out: + if (err) { + aead_request_complete(areq, err); + return; + } + err = crypto_authenc_genicv(areq, CRYPTO_TFM_REQ_MAY_SLEEP); authenc_request_complete(areq, err); } -static int crypto_authenc_copy_assoc(struct aead_request *req) -{ - struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, - NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); @@ -207,10 +202,7 @@ static int crypto_authenc_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_copy_assoc(req); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, req->assoclen); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); } @@ -223,11 +215,18 @@ static int crypto_authenc_encrypt(struct aead_request *req) if (err) return err; - return crypto_authenc_genicv(req, aead_request_flags(req)); + return crypto_authenc_genicv(req, 0); +} + +static void authenc_decrypt_tail_done(void *data, int err) +{ + struct aead_request *req = data; + + authenc_request_complete(req, err); } static int crypto_authenc_decrypt_tail(struct aead_request *req, - unsigned int flags) + unsigned int mask) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct aead_instance *inst = aead_alg_instance(authenc); @@ -238,6 +237,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, struct skcipher_request *skreq = (void *)(areq_ctx->tail + ictx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc); + unsigned int flags = aead_request_flags(req) & ~mask; u8 *ihash = ahreq->result + authsize; struct scatterlist *src, *dst; @@ -253,25 +253,25 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, ctx->enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), - req->base.complete, req->base.data); + skcipher_request_set_callback(skreq, flags, + mask ? authenc_decrypt_tail_done : + req->base.complete, + mask ? req : req->base.data); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen - authsize, req->iv); return crypto_skcipher_decrypt(skreq); } -static void authenc_verify_ahash_done(struct crypto_async_request *areq, - int err) +static void authenc_verify_ahash_done(void *data, int err) { - struct aead_request *req = areq->data; - - if (err) - goto out; - - err = crypto_authenc_decrypt_tail(req, 0); + struct aead_request *req = data; -out: + if (err) { + aead_request_complete(req, err); + return; + } + err = crypto_authenc_decrypt_tail(req, CRYPTO_TFM_REQ_MAY_SLEEP); authenc_request_complete(req, err); } @@ -288,9 +288,6 @@ static int crypto_authenc_decrypt(struct aead_request *req) u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen - authsize); @@ -301,7 +298,7 @@ static int crypto_authenc_decrypt(struct aead_request *req) if (err) return err; - return crypto_authenc_decrypt_tail(req, aead_request_flags(req)); + return crypto_authenc_decrypt_tail(req, 0); } static int crypto_authenc_init_tfm(struct crypto_aead *tfm) @@ -311,7 +308,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -323,14 +319,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; crypto_aead_set_reqsize( tfm, @@ -344,8 +334,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -357,7 +345,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_free(struct aead_instance *inst) @@ -375,9 +362,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -400,10 +387,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); - ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, - auth_base->cra_alignmask + 1); + ctx->reqoff = 2 * auth->digestsize; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -420,12 +406,11 @@ static int crypto_authenc_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_init_tfm; @@ -461,7 +446,7 @@ static void __exit crypto_authenc_module_exit(void) crypto_unregister_template(&crypto_authenc_tmpl); } -subsys_initcall(crypto_authenc_module_init); +module_init(crypto_authenc_module_init); module_exit(crypto_authenc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index b60e61b1904c..d1bf0fda3f2e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -12,7 +12,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_esn_request_ctx { @@ -87,11 +85,8 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req, unsigned int flags) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; @@ -107,10 +102,9 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req, return 0; } -static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, - int err) +static void authenc_esn_geniv_ahash_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; err = err ?: crypto_authenc_esn_genicv_tail(req, 0); aead_request_complete(req, err); @@ -123,8 +117,7 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; @@ -153,10 +146,9 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, } -static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, - int err) +static void crypto_authenc_esn_encrypt_done(void *data, int err) { - struct aead_request *areq = req->data; + struct aead_request *areq = data; if (!err) err = crypto_authenc_esn_genicv(areq, 0); @@ -164,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, authenc_esn_request_complete(areq, err); } -static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len) -{ - struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_esn_encrypt(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); @@ -196,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_esn_copy(req, assoclen); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, assoclen); sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); } @@ -226,8 +201,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, struct skcipher_request *skreq = (void *)(areq_ctx->tail + ctx->reqoff); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int cryptlen = req->cryptlen - authsize; unsigned int assoclen = req->assoclen; struct scatterlist *dst = req->dst; @@ -258,10 +232,9 @@ decrypt: return crypto_skcipher_decrypt(skreq); } -static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, - int err) +static void authenc_esn_verify_ahash_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); authenc_esn_request_complete(req, err); @@ -275,8 +248,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; u8 *ihash = ohash + crypto_ahash_digestsize(auth); @@ -286,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) cryptlen -= authsize; - if (req->src != dst) { - err = crypto_authenc_esn_copy(req, assoclen + cryptlen); - if (err) - return err; - } + if (req->src != dst) + memcpy_sglist(dst, req->src, assoclen + cryptlen); scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, authsize, 0); @@ -326,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -338,17 +306,10 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; - ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth), - crypto_ahash_alignmask(auth) + 1); + ctx->reqoff = 2 * crypto_ahash_digestsize(auth); crypto_aead_set_reqsize( tfm, @@ -362,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -375,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_esn_free(struct aead_instance *inst) @@ -393,9 +351,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -418,7 +376,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -434,12 +392,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_esn_init_tfm; @@ -476,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void) crypto_unregister_template(&crypto_authenc_esn_tmpl); } -subsys_initcall(crypto_authenc_esn_module_init); +module_init(crypto_authenc_esn_module_init); module_exit(crypto_authenc_esn_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/blake2b.c b/crypto/blake2b.c new file mode 100644 index 000000000000..67a6dae43a54 --- /dev/null +++ b/crypto/blake2b.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for BLAKE2b + * + * Copyright 2025 Google LLC + */ +#include <crypto/blake2b.h> +#include <crypto/internal/hash.h> +#include <linux/kernel.h> +#include <linux/module.h> + +struct blake2b_tfm_ctx { + unsigned int keylen; + u8 key[BLAKE2B_KEY_SIZE]; +}; + +static int crypto_blake2b_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen > BLAKE2B_KEY_SIZE) + return -EINVAL; + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + return 0; +} + +#define BLAKE2B_CTX(desc) ((struct blake2b_ctx *)shash_desc_ctx(desc)) + +static int crypto_blake2b_init(struct shash_desc *desc) +{ + const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + unsigned int digestsize = crypto_shash_digestsize(desc->tfm); + + blake2b_init_key(BLAKE2B_CTX(desc), digestsize, + tctx->key, tctx->keylen); + return 0; +} + +static int crypto_blake2b_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + blake2b_update(BLAKE2B_CTX(desc), data, len); + return 0; +} + +static int crypto_blake2b_final(struct shash_desc *desc, u8 *out) +{ + blake2b_final(BLAKE2B_CTX(desc), out); + return 0; +} + +static int crypto_blake2b_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + unsigned int digestsize = crypto_shash_digestsize(desc->tfm); + + blake2b(tctx->key, tctx->keylen, data, len, out, digestsize); + return 0; +} + +#define BLAKE2B_ALG(name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = name "-lib", \ + .base.cra_priority = 300, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2b_setkey, \ + .init = crypto_blake2b_init, \ + .update = crypto_blake2b_update, \ + .final = crypto_blake2b_final, \ + .digest = crypto_blake2b_digest, \ + .descsize = sizeof(struct blake2b_ctx), \ + } + +static struct shash_alg algs[] = { + BLAKE2B_ALG("blake2b-160", BLAKE2B_160_HASH_SIZE), + BLAKE2B_ALG("blake2b-256", BLAKE2B_256_HASH_SIZE), + BLAKE2B_ALG("blake2b-384", BLAKE2B_384_HASH_SIZE), + BLAKE2B_ALG("blake2b-512", BLAKE2B_512_HASH_SIZE), +}; + +static int __init crypto_blake2b_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_blake2b_mod_init); + +static void __exit crypto_blake2b_mod_exit(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} +module_exit(crypto_blake2b_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API support for BLAKE2b"); + +MODULE_ALIAS_CRYPTO("blake2b-160"); +MODULE_ALIAS_CRYPTO("blake2b-160-lib"); +MODULE_ALIAS_CRYPTO("blake2b-256"); +MODULE_ALIAS_CRYPTO("blake2b-256-lib"); +MODULE_ALIAS_CRYPTO("blake2b-384"); +MODULE_ALIAS_CRYPTO("blake2b-384-lib"); +MODULE_ALIAS_CRYPTO("blake2b-512"); +MODULE_ALIAS_CRYPTO("blake2b-512-lib"); diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c deleted file mode 100644 index 6704c0355889..000000000000 --- a/crypto/blake2b_generic.c +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) -/* - * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b - * reference implementation, but it has been heavily modified for use in the - * kernel. The reference implementation was: - * - * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under - * the terms of the CC0, the OpenSSL Licence, or the Apache Public License - * 2.0, at your option. The terms of these licenses can be found at: - * - * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 - * - * More information about BLAKE2 can be found at https://blake2.net. - */ - -#include <asm/unaligned.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bitops.h> -#include <crypto/internal/blake2b.h> -#include <crypto/internal/hash.h> - -static const u8 blake2b_sigma[12][16] = { - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, - { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, - { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, - { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, - { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, - { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, - { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, - { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, - { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, - { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, - { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } -}; - -static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) -{ - S->t[0] += inc; - S->t[1] += (S->t[0] < inc); -} - -#define G(r,i,a,b,c,d) \ - do { \ - a = a + b + m[blake2b_sigma[r][2*i+0]]; \ - d = ror64(d ^ a, 32); \ - c = c + d; \ - b = ror64(b ^ c, 24); \ - a = a + b + m[blake2b_sigma[r][2*i+1]]; \ - d = ror64(d ^ a, 16); \ - c = c + d; \ - b = ror64(b ^ c, 63); \ - } while (0) - -#define ROUND(r) \ - do { \ - G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ - G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ - G(r,2,v[ 2],v[ 6],v[10],v[14]); \ - G(r,3,v[ 3],v[ 7],v[11],v[15]); \ - G(r,4,v[ 0],v[ 5],v[10],v[15]); \ - G(r,5,v[ 1],v[ 6],v[11],v[12]); \ - G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ - G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ - } while (0) - -static void blake2b_compress_one_generic(struct blake2b_state *S, - const u8 block[BLAKE2B_BLOCK_SIZE]) -{ - u64 m[16]; - u64 v[16]; - size_t i; - - for (i = 0; i < 16; ++i) - m[i] = get_unaligned_le64(block + i * sizeof(m[i])); - - for (i = 0; i < 8; ++i) - v[i] = S->h[i]; - - v[ 8] = BLAKE2B_IV0; - v[ 9] = BLAKE2B_IV1; - v[10] = BLAKE2B_IV2; - v[11] = BLAKE2B_IV3; - v[12] = BLAKE2B_IV4 ^ S->t[0]; - v[13] = BLAKE2B_IV5 ^ S->t[1]; - v[14] = BLAKE2B_IV6 ^ S->f[0]; - v[15] = BLAKE2B_IV7 ^ S->f[1]; - - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - ROUND(10); - ROUND(11); -#ifdef CONFIG_CC_IS_CLANG -#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */ -#endif - for (i = 0; i < 8; ++i) - S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; -} - -#undef G -#undef ROUND - -void blake2b_compress_generic(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc) -{ - do { - blake2b_increment_counter(state, inc); - blake2b_compress_one_generic(state, block); - block += BLAKE2B_BLOCK_SIZE; - } while (--nblocks); -} -EXPORT_SYMBOL(blake2b_compress_generic); - -static int crypto_blake2b_update_generic(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); -} - -static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2b_final(desc, out, blake2b_compress_generic); -} - -#define BLAKE2B_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2b_setkey, \ - .init = crypto_blake2b_init, \ - .update = crypto_blake2b_update_generic, \ - .final = crypto_blake2b_final_generic, \ - .descsize = sizeof(struct blake2b_state), \ - } - -static struct shash_alg blake2b_algs[] = { - BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", - BLAKE2B_160_HASH_SIZE), - BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", - BLAKE2B_256_HASH_SIZE), - BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", - BLAKE2B_384_HASH_SIZE), - BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", - BLAKE2B_512_HASH_SIZE), -}; - -static int __init blake2b_mod_init(void) -{ - return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); -} - -static void __exit blake2b_mod_fini(void) -{ - crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); -} - -subsys_initcall(blake2b_mod_init); -module_exit(blake2b_mod_fini); - -MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); -MODULE_DESCRIPTION("BLAKE2b generic implementation"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("blake2b-160"); -MODULE_ALIAS_CRYPTO("blake2b-160-generic"); -MODULE_ALIAS_CRYPTO("blake2b-256"); -MODULE_ALIAS_CRYPTO("blake2b-256-generic"); -MODULE_ALIAS_CRYPTO("blake2b-384"); -MODULE_ALIAS_CRYPTO("blake2b-384-generic"); -MODULE_ALIAS_CRYPTO("blake2b-512"); -MODULE_ALIAS_CRYPTO("blake2b-512-generic"); diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c deleted file mode 100644 index 72fe480f9bd6..000000000000 --- a/crypto/blake2s_generic.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * shash interface to the generic implementation of BLAKE2s - * - * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -#include <crypto/internal/blake2s.h> -#include <crypto/internal/hash.h> - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> - -static int crypto_blake2s_update_generic(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic); -} - -static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, blake2s_compress_generic); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_generic, \ - .final = crypto_blake2s_final_generic, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-generic", - BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-generic", - BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-generic", - BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-generic", - BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_mod_init(void) -{ - return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -static void __exit blake2s_mod_exit(void) -{ - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -subsys_initcall(blake2s_mod_init); -module_exit(blake2s_mod_exit); - -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-generic"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-generic"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-generic"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-generic"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/blowfish_common.c b/crypto/blowfish_common.c index 1c072012baff..c0208ce269a3 100644 --- a/crypto/blowfish_common.c +++ b/crypto/blowfish_common.c @@ -14,11 +14,12 @@ * Copyright (c) Kyle McMartin <kyle@debian.org> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ + +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> -#include <linux/crypto.h> #include <linux/types.h> #include <crypto/blowfish.h> diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 003b52c6880e..f3c5f9b09850 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -11,11 +11,12 @@ * Copyright (c) Kyle McMartin <kyle@debian.org> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ + +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/unaligned.h> -#include <linux/crypto.h> +#include <linux/unaligned.h> #include <linux/types.h> #include <crypto/blowfish.h> @@ -123,7 +124,7 @@ static void __exit blowfish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(blowfish_mod_init); +module_init(blowfish_mod_init); module_exit(blowfish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c new file mode 100644 index 000000000000..a88798d3e8c8 --- /dev/null +++ b/crypto/bpf_crypto_skcipher.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Meta, Inc */ +#include <linux/types.h> +#include <linux/module.h> +#include <linux/bpf_crypto.h> +#include <crypto/skcipher.h> + +static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo) +{ + return crypto_alloc_lskcipher(algo, 0, 0); +} + +static void bpf_crypto_lskcipher_free_tfm(void *tfm) +{ + crypto_free_lskcipher(tfm); +} + +static int bpf_crypto_lskcipher_has_algo(const char *algo) +{ + return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK); +} + +static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen) +{ + return crypto_lskcipher_setkey(tfm, key, keylen); +} + +static u32 bpf_crypto_lskcipher_get_flags(void *tfm) +{ + return crypto_lskcipher_get_flags(tfm); +} + +static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm) +{ + return crypto_lskcipher_ivsize(tfm); +} + +static unsigned int bpf_crypto_lskcipher_statesize(void *tfm) +{ + return crypto_lskcipher_statesize(tfm); +} + +static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst, + unsigned int len, u8 *siv) +{ + return crypto_lskcipher_encrypt(tfm, src, dst, len, siv); +} + +static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst, + unsigned int len, u8 *siv) +{ + return crypto_lskcipher_decrypt(tfm, src, dst, len, siv); +} + +static const struct bpf_crypto_type bpf_crypto_lskcipher_type = { + .alloc_tfm = bpf_crypto_lskcipher_alloc_tfm, + .free_tfm = bpf_crypto_lskcipher_free_tfm, + .has_algo = bpf_crypto_lskcipher_has_algo, + .setkey = bpf_crypto_lskcipher_setkey, + .encrypt = bpf_crypto_lskcipher_encrypt, + .decrypt = bpf_crypto_lskcipher_decrypt, + .ivsize = bpf_crypto_lskcipher_ivsize, + .statesize = bpf_crypto_lskcipher_statesize, + .get_flags = bpf_crypto_lskcipher_get_flags, + .owner = THIS_MODULE, + .name = "skcipher", +}; + +static int __init bpf_crypto_skcipher_init(void) +{ + return bpf_crypto_register_type(&bpf_crypto_lskcipher_type); +} + +static void __exit bpf_crypto_skcipher_exit(void) +{ + int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type); + WARN_ON_ONCE(err); +} + +module_init(bpf_crypto_skcipher_init); +module_exit(bpf_crypto_skcipher_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher support for BPF"); diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index fd1a88af9e77..ee4336a04b93 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -9,13 +9,13 @@ * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ -#include <linux/crypto.h> +#include <crypto/algapi.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitops.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static const u32 camellia_sp1110[256] = { 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, @@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void) crypto_unregister_alg(&camellia_alg); } -subsys_initcall(camellia_init); +module_init(camellia_init); module_exit(camellia_fini); MODULE_DESCRIPTION("Camellia Cipher Algorithm"); diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 0257c14cefc2..f68330793e0c 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -13,9 +13,9 @@ */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> +#include <crypto/algapi.h> #include <linux/init.h> -#include <linux/crypto.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> @@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast5_mod_init); +module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 75346380aa0b..4c08c42646f0 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -10,9 +10,9 @@ */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> +#include <crypto/algapi.h> #include <linux/init.h> -#include <linux/crypto.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> @@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast6_mod_init); +module_init(cast6_mod_init); module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cast_common.c b/crypto/cast_common.c index 9b2f60fd4cef..fec1f6609a40 100644 --- a/crypto/cast_common.c +++ b/crypto/cast_common.c @@ -282,4 +282,5 @@ __visible const u32 cast_s4[256] = { }; EXPORT_SYMBOL_GPL(cast_s4); +MODULE_DESCRIPTION("Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)"); MODULE_LICENSE("GPL"); diff --git a/crypto/cbc.c b/crypto/cbc.c index 6c03e96b945f..ed3df6246765 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -5,8 +5,6 @@ * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <crypto/algapi.h> -#include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> @@ -14,99 +12,72 @@ #include <linux/log2.h> #include <linux/module.h> -static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); - do { + for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) { crypto_xor(iv, src, bsize); - fn(tfm, dst, iv); + crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL); memcpy(iv, dst, bsize); - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); + } return nbytes; } -static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { crypto_xor(src, iv, bsize); - fn(tfm, src, src); + crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL); iv = src; src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_encrypt(struct skcipher_request *req) +static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, u32 flags) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL; + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_encrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_encrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } + if (src == dst) + rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv); - return err; + return rem && final ? -EINVAL : rem; } -static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + const u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { - fn(tfm, dst, src); + crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL); crypto_xor(dst, iv, bsize); iv = src; @@ -114,83 +85,76 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); u8 last_iv[MAX_CIPHER_BLOCKSIZE]; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + if (nbytes < bsize) + goto out; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; memcpy(last_iv, src, bsize); for (;;) { - fn(tfm, src, src); + crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL); if ((nbytes -= bsize) < bsize) break; crypto_xor(src, src - bsize, bsize); src -= bsize; } - crypto_xor(src, walk->iv, bsize); - memcpy(walk->iv, last_iv, bsize); + crypto_xor(src, iv, bsize); + memcpy(iv, last_iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt(struct skcipher_request *req) +static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, u32 flags) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL; + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_decrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_decrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } + if (src == dst) + rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv); - return err; + return rem && final ? -EINVAL : rem; } static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; - struct crypto_alg *alg; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - alg = skcipher_ialg_simple(inst); - err = -EINVAL; - if (!is_power_of_2(alg->cra_blocksize)) + if (!is_power_of_2(inst->alg.co.base.cra_blocksize)) + goto out_free_inst; + + if (inst->alg.co.statesize) goto out_free_inst; inst->alg.encrypt = crypto_cbc_encrypt; inst->alg.decrypt = crypto_cbc_decrypt; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) { out_free_inst: inst->free(inst); @@ -215,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void) crypto_unregister_template(&crypto_cbc_tmpl); } -subsys_initcall(crypto_cbc_module_init); +module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ccm.c b/crypto/ccm.c index 6b815ece51c6..2ae929ffdef8 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -10,11 +10,12 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string.h> struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; @@ -54,10 +55,6 @@ struct cbcmac_tfm_ctx { struct crypto_cipher *child; }; -struct cbcmac_desc_ctx { - unsigned int len; -}; - static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { @@ -218,15 +215,15 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, cryptlen += ilen; } - ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen); + ahash_request_set_crypt(ahreq, plain, odata, cryptlen); err = crypto_ahash_finup(ahreq); out: return err; } -static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) +static void crypto_ccm_encrypt_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); u8 *odata = pctx->odata; @@ -320,10 +317,9 @@ static int crypto_ccm_encrypt(struct aead_request *req) return err; } -static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, - int err) +static void crypto_ccm_decrypt_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(aead); @@ -448,10 +444,10 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; - struct skcipher_alg *ctr; struct hash_alg_common *mac; int err; @@ -479,13 +475,12 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ictx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ictx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; /* ctr and cbcmac must use the same underlying block cipher. */ @@ -505,10 +500,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = mac->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.ivsize = 16; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.init = crypto_ccm_init_tfm; @@ -785,13 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) { - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_digestsize(pdesc->tfm); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs; + u8 *dg = shash_desc_ctx(pdesc); - ctx->len = 0; memset(dg, 0, bs); - return 0; } @@ -800,40 +791,33 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; - - while (len > 0) { - unsigned int l = min(len, bs - ctx->len); - - crypto_xor(dg + ctx->len, p, l); - ctx->len +=l; - len -= l; - p += l; - - if (ctx->len == bs) { - crypto_cipher_encrypt_one(tfm, dg, dg); - ctx->len = 0; - } - } + u8 *dg = shash_desc_ctx(pdesc); - return 0; + do { + crypto_xor(dg, p, bs); + crypto_cipher_encrypt_one(tfm, dg, dg); + p += bs; + len -= bs; + } while (len >= bs); + return len; } -static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; - - if (ctx->len) - crypto_cipher_encrypt_one(tfm, dg, dg); + u8 *dg = shash_desc_ctx(pdesc); + if (len) { + crypto_xor(dg, src, len); + crypto_cipher_encrypt_one(tfm, out, dg); + return 0; + } memcpy(out, dg, bs); return 0; } @@ -888,20 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx), - alg->cra_alignmask + 1) + - alg->cra_blocksize; + inst->alg.descsize = alg->cra_blocksize; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY; inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); inst->alg.base.cra_init = cbcmac_init_tfm; inst->alg.base.cra_exit = cbcmac_exit_tfm; inst->alg.init = crypto_cbcmac_digest_init; inst->alg.update = crypto_cbcmac_digest_update; - inst->alg.final = crypto_cbcmac_digest_final; + inst->alg.finup = crypto_cbcmac_digest_finup; inst->alg.setkey = crypto_cbcmac_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -946,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void) ARRAY_SIZE(crypto_ccm_tmpls)); } -subsys_initcall(crypto_ccm_module_init); +module_init(crypto_ccm_module_init); module_exit(crypto_ccm_module_exit); MODULE_LICENSE("GPL"); @@ -955,4 +938,4 @@ MODULE_ALIAS_CRYPTO("ccm_base"); MODULE_ALIAS_CRYPTO("rfc4309"); MODULE_ALIAS_CRYPTO("ccm"); MODULE_ALIAS_CRYPTO("cbcmac"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/cfb.c b/crypto/cfb.c deleted file mode 100644 index 0d664dfb47bc..000000000000 --- a/crypto/cfb.c +++ /dev/null @@ -1,254 +0,0 @@ -//SPDX-License-Identifier: GPL-2.0 -/* - * CFB: Cipher FeedBack mode - * - * Copyright (c) 2018 James.Bottomley@HansenPartnership.com - * - * CFB is a stream cipher mode which is layered on to a block - * encryption scheme. It works very much like a one time pad where - * the pad is generated initially from the encrypted IV and then - * subsequently from the encrypted previous block of ciphertext. The - * pad is XOR'd into the plain text to get the final ciphertext. - * - * The scheme of CFB is best described by wikipedia: - * - * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB - * - * Note that since the pad for both encryption and decryption is - * generated by an encryption operation, CFB never uses the block - * decryption function. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/skcipher.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/string.h> - -static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm) -{ - return crypto_cipher_blocksize(skcipher_cipher_simple(tfm)); -} - -static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm, - const u8 *src, u8 *dst) -{ - crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); -} - -/* final encrypt and decrypt is the same */ -static void crypto_cfb_final(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - const unsigned long alignmask = crypto_skcipher_alignmask(tfm); - u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; - u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1); - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - unsigned int nbytes = walk->nbytes; - - crypto_cfb_encrypt_one(tfm, iv, stream); - crypto_xor_cpy(dst, stream, src, nbytes); -} - -static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - const unsigned int bsize = crypto_cfb_bsize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_cfb_encrypt_one(tfm, iv, dst); - crypto_xor(dst, src, bsize); - iv = dst; - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - const unsigned int bsize = crypto_cfb_bsize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; - u8 tmp[MAX_CIPHER_BLOCKSIZE]; - - do { - crypto_cfb_encrypt_one(tfm, iv, tmp); - crypto_xor(src, tmp, bsize); - iv = src; - - src += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static int crypto_cfb_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - unsigned int bsize = crypto_cfb_bsize(tfm); - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes >= bsize) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cfb_encrypt_inplace(&walk, tfm); - else - err = crypto_cfb_encrypt_segment(&walk, tfm); - err = skcipher_walk_done(&walk, err); - } - - if (walk.nbytes) { - crypto_cfb_final(&walk, tfm); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - -static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - const unsigned int bsize = crypto_cfb_bsize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_cfb_encrypt_one(tfm, iv, dst); - crypto_xor(dst, src, bsize); - iv = src; - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - const unsigned int bsize = crypto_cfb_bsize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 * const iv = walk->iv; - u8 tmp[MAX_CIPHER_BLOCKSIZE]; - - do { - crypto_cfb_encrypt_one(tfm, iv, tmp); - memcpy(iv, src, bsize); - crypto_xor(src, tmp, bsize); - src += bsize; - } while ((nbytes -= bsize) >= bsize); - - return nbytes; -} - -static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk, - struct crypto_skcipher *tfm) -{ - if (walk->src.virt.addr == walk->dst.virt.addr) - return crypto_cfb_decrypt_inplace(walk, tfm); - else - return crypto_cfb_decrypt_segment(walk, tfm); -} - -static int crypto_cfb_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - const unsigned int bsize = crypto_cfb_bsize(tfm); - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes >= bsize) { - err = crypto_cfb_decrypt_blocks(&walk, tfm); - err = skcipher_walk_done(&walk, err); - } - - if (walk.nbytes) { - crypto_cfb_final(&walk, tfm); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - -static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct skcipher_instance *inst; - struct crypto_alg *alg; - int err; - - inst = skcipher_alloc_instance_simple(tmpl, tb); - if (IS_ERR(inst)) - return PTR_ERR(inst); - - alg = skcipher_ialg_simple(inst); - - /* CFB mode is a stream cipher. */ - inst->alg.base.cra_blocksize = 1; - - /* - * To simplify the implementation, configure the skcipher walk to only - * give a partial block at the very end, never earlier. - */ - inst->alg.chunksize = alg->cra_blocksize; - - inst->alg.encrypt = crypto_cfb_encrypt; - inst->alg.decrypt = crypto_cfb_decrypt; - - err = skcipher_register_instance(tmpl, inst); - if (err) - inst->free(inst); - - return err; -} - -static struct crypto_template crypto_cfb_tmpl = { - .name = "cfb", - .create = crypto_cfb_create, - .module = THIS_MODULE, -}; - -static int __init crypto_cfb_module_init(void) -{ - return crypto_register_template(&crypto_cfb_tmpl); -} - -static void __exit crypto_cfb_module_exit(void) -{ - crypto_unregister_template(&crypto_cfb_tmpl); -} - -subsys_initcall(crypto_cfb_module_init); -module_exit(crypto_cfb_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CFB block cipher mode of operation"); -MODULE_ALIAS_CRYPTO("cfb"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/chacha_generic.c b/crypto/chacha.c index 8beea79ab117..ec16d5a33f3c 100644 --- a/crypto/chacha_generic.c +++ b/crypto/chacha.c @@ -1,27 +1,61 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539) + * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers * * Copyright (C) 2015 Martin Willi * Copyright (C) 2018 Google LLC */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> -#include <crypto/internal/chacha.h> +#include <crypto/chacha.h> #include <crypto/internal/skcipher.h> #include <linux/module.h> +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static int chacha_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static int chacha20_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static int chacha12_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) + const struct chacha_ctx *ctx, + const u8 iv[CHACHA_IV_SIZE]) { struct skcipher_walk walk; - u32 state[16]; + struct chacha_state state; int err; err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(&state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -29,8 +63,8 @@ static int chacha_stream_xor(struct skcipher_request *req, if (nbytes < walk.total) nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); + chacha_crypt(&state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes, ctx->nrounds); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -40,7 +74,7 @@ static int chacha_stream_xor(struct skcipher_request *req, static int crypto_chacha_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); return chacha_stream_xor(req, ctx, req->iv); } @@ -48,14 +82,14 @@ static int crypto_chacha_crypt(struct skcipher_request *req) static int crypto_xchacha_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx subctx; - u32 state[16]; + struct chacha_state state; u8 real_iv[16]; /* Compute the subkey given the original key and first 128 nonce bits */ - chacha_init_generic(state, ctx->key, req->iv); - hchacha_block_generic(state, subctx.key, ctx->nrounds); + chacha_init(&state, ctx->key, req->iv); + hchacha_block(&state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; /* Build the real IV */ @@ -69,8 +103,8 @@ static int crypto_xchacha_crypt(struct skcipher_request *req) static struct skcipher_alg algs[] = { { .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-generic", - .base.cra_priority = 100, + .base.cra_driver_name = "chacha20-lib", + .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, @@ -82,10 +116,11 @@ static struct skcipher_alg algs[] = { .setkey = chacha20_setkey, .encrypt = crypto_chacha_crypt, .decrypt = crypto_chacha_crypt, - }, { + }, + { .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-generic", - .base.cra_priority = 100, + .base.cra_driver_name = "xchacha20-lib", + .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, @@ -97,10 +132,11 @@ static struct skcipher_alg algs[] = { .setkey = chacha20_setkey, .encrypt = crypto_xchacha_crypt, .decrypt = crypto_xchacha_crypt, - }, { + }, + { .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-generic", - .base.cra_priority = 100, + .base.cra_driver_name = "xchacha12-lib", + .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, @@ -115,25 +151,25 @@ static struct skcipher_alg algs[] = { } }; -static int __init chacha_generic_mod_init(void) +static int __init crypto_chacha_mod_init(void) { return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); } -static void __exit chacha_generic_mod_fini(void) +static void __exit crypto_chacha_mod_fini(void) { crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } -subsys_initcall(chacha_generic_mod_init); -module_exit(chacha_generic_mod_fini); +module_init(crypto_chacha_mod_init); +module_exit(crypto_chacha_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)"); +MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers"); MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-generic"); +MODULE_ALIAS_CRYPTO("chacha20-lib"); MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-generic"); +MODULE_ALIAS_CRYPTO("xchacha20-lib"); MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-generic"); +MODULE_ALIAS_CRYPTO("xchacha12-lib"); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 97bbb135e9a6..b4b5a7198d84 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -12,34 +12,21 @@ #include <crypto/chacha.h> #include <crypto/poly1305.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> +#include <linux/string.h> struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; - struct crypto_ahash_spawn poly; unsigned int saltlen; }; struct chachapoly_ctx { struct crypto_skcipher *chacha; - struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; - u8 salt[]; -}; - -struct poly_req { - /* zero byte padding for AD/ciphertext, as needed */ - u8 pad[POLY1305_BLOCK_SIZE]; - /* tail data with AD/ciphertext lengths */ - struct { - __le64 assoclen; - __le64 cryptlen; - } tail; - struct scatterlist src[1]; - struct ahash_request req; /* must be last member */ + u8 salt[] __counted_by(saltlen); }; struct chacha_req { @@ -62,7 +49,6 @@ struct chachapoly_req_ctx { /* request flags, with MAY_SLEEP cleared if needed */ u32 flags; union { - struct poly_req poly; struct chacha_req chacha; } u; }; @@ -105,19 +91,9 @@ static int poly_verify_tag(struct aead_request *req) return 0; } -static int poly_copy_tag(struct aead_request *req) +static void chacha_decrypt_done(void *data, int err) { - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - scatterwalk_map_and_copy(rctx->tag, req->dst, - req->assoclen + rctx->cryptlen, - sizeof(rctx->tag), 1); - return 0; -} - -static void chacha_decrypt_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_verify_tag); + async_done_continue(data, err, poly_verify_tag); } static int chacha_decrypt(struct aead_request *req) @@ -151,210 +127,76 @@ skip: return poly_verify_tag(req); } -static int poly_tail_continue(struct aead_request *req) -{ - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - if (rctx->cryptlen == req->cryptlen) /* encrypting */ - return poly_copy_tag(req); - - return chacha_decrypt(req); -} - -static void poly_tail_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_tail_continue); -} - -static int poly_tail(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - preq->tail.assoclen = cpu_to_le64(rctx->assoclen); - preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); - sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_tail_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, - rctx->tag, sizeof(preq->tail)); - - err = crypto_ahash_finup(&preq->req); - if (err) - return err; - - return poly_tail_continue(req); -} - -static void poly_cipherpad_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_tail); -} - -static int poly_cipherpad(struct aead_request *req) +static int poly_hash(struct aead_request *req) { - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; + const void *zp = page_address(ZERO_PAGE(0)); + struct scatterlist *sg = req->src; + struct poly1305_desc_ctx desc; + struct scatter_walk walk; + struct { + union { + struct { + __le64 assoclen; + __le64 cryptlen; + }; + u8 u8[16]; + }; + } tail; unsigned int padlen; - int err; - - padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipherpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); + unsigned int total; - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_tail(req); -} - -static void poly_cipher_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_cipherpad); -} - -static int poly_cipher(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - struct scatterlist *crypt = req->src; - int err; + if (sg != req->dst) + memcpy_sglist(req->dst, sg, req->assoclen); if (rctx->cryptlen == req->cryptlen) /* encrypting */ - crypt = req->dst; - - crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipher_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; + sg = req->dst; - return poly_cipherpad(req); -} + poly1305_init(&desc, rctx->key); + scatterwalk_start(&walk, sg); -static void poly_adpad_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_cipher); -} + total = rctx->assoclen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); -static int poly_adpad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - unsigned int padlen; - int err; + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_adpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_cipher(req); -} - -static void poly_ad_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_adpad); -} - -static int poly_ad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_ad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_adpad(req); -} - -static void poly_setkey_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_ad); -} + poly1305_update(&desc, zp, padlen); -static int poly_setkey(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + scatterwalk_skip(&walk, req->assoclen - rctx->assoclen); - sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); + total = rctx->cryptlen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_setkey_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_ad(req); -} - -static void poly_init_done(struct crypto_async_request *areq, int err) -{ - async_done_continue(areq->data, err, poly_setkey); -} + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } -static int poly_init(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; + poly1305_update(&desc, zp, padlen); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_init_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); + tail.assoclen = cpu_to_le64(rctx->assoclen); + tail.cryptlen = cpu_to_le64(rctx->cryptlen); + poly1305_update(&desc, tail.u8, sizeof(tail)); + memzero_explicit(&tail, sizeof(tail)); + poly1305_final(&desc, rctx->tag); - err = crypto_ahash_init(&preq->req); - if (err) - return err; + if (rctx->cryptlen != req->cryptlen) + return chacha_decrypt(req); - return poly_setkey(req); + memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag)); + return 0; } -static void poly_genkey_done(struct crypto_async_request *areq, int err) +static void poly_genkey_done(void *data, int err) { - async_done_continue(areq->data, err, poly_init); + async_done_continue(data, err, poly_hash); } static int poly_genkey(struct aead_request *req) @@ -388,12 +230,12 @@ static int poly_genkey(struct aead_request *req) if (err) return err; - return poly_init(req); + return poly_hash(req); } -static void chacha_encrypt_done(struct crypto_async_request *areq, int err) +static void chacha_encrypt_done(void *data, int err) { - async_done_continue(areq->data, err, poly_genkey); + async_done_continue(data, err, poly_genkey); } static int chacha_encrypt(struct aead_request *req) @@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req) /* encrypt call chain: * - chacha_encrypt/done() * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() - * - poly_copy_tag() + * - poly_hash() */ return chacha_encrypt(req); } @@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req) /* decrypt call chain: * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() + * - poly_hash() * - chacha_decrypt/done() * - poly_verify_tag() */ @@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm) struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *chacha; - struct crypto_ahash *poly; unsigned long align; - poly = crypto_spawn_ahash(&ictx->poly); - if (IS_ERR(poly)) - return PTR_ERR(poly); - chacha = crypto_spawn_skcipher(&ictx->chacha); - if (IS_ERR(chacha)) { - crypto_free_ahash(poly); + if (IS_ERR(chacha)) return PTR_ERR(chacha); - } ctx->chacha = chacha; - ctx->poly = poly; ctx->saltlen = ictx->saltlen; align = crypto_aead_alignmask(tfm); @@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm) crypto_aead_set_reqsize( tfm, align + offsetof(struct chachapoly_req_ctx, u) + - max(offsetof(struct chacha_req, req) + - sizeof(struct skcipher_request) + - crypto_skcipher_reqsize(chacha), - offsetof(struct poly_req, req) + - sizeof(struct ahash_request) + - crypto_ahash_reqsize(poly))); + offsetof(struct chacha_req, req) + + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(chacha)); return 0; } @@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm) { struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - crypto_free_ahash(ctx->poly); crypto_free_skcipher(ctx->chacha); } @@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst) struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->chacha); - crypto_drop_ahash(&ctx->poly); kfree(inst); } @@ -558,8 +374,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; - struct skcipher_alg *chacha; - struct hash_alg_common *poly; + struct skcipher_alg_common *chacha; int err; if (ivsize > CHACHAPOLY_IV_SIZE) @@ -579,19 +394,14 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - chacha = crypto_spawn_skcipher_alg(&ctx->chacha); - - err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[2]), 0, mask); - if (err) - goto err_free_inst; - poly = crypto_spawn_ahash_alg(&ctx->poly); + chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); err = -EINVAL; - if (poly->digestsize != POLY1305_DIGEST_SIZE) + if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") && + strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic")) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ - if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE) + if (chacha->ivsize != CHACHA_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (chacha->base.cra_blocksize != 1) @@ -599,23 +409,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_name, - poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305)", name, + chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_driver_name, - poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305-generic)", name, + chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_priority = (chacha->base.cra_priority + - poly->base.cra_priority) / 2; + inst->alg.base.cra_priority = chacha->base.cra_priority; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = chacha->base.cra_alignmask | - poly->base.cra_alignmask; + inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha); + inst->alg.chunksize = chacha->chunksize; inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; inst->alg.init = chachapoly_init; inst->alg.exit = chachapoly_exit; @@ -668,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void) ARRAY_SIZE(rfc7539_tmpls)); } -subsys_initcall(chacha20poly1305_module_init); +module_init(chacha20poly1305_module_init); module_exit(chacha20poly1305_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/cipher.c b/crypto/cipher.c index b47141ed4a9f..1fe62bf79656 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -54,7 +53,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL"); static inline void cipher_crypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src, bool enc) @@ -82,11 +81,39 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, { cipher_crypt_one(tfm, dst, src, true); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL"); void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, false); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL"); + +struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher) +{ + struct crypto_tfm *tfm = crypto_cipher_tfm(cipher); + struct crypto_alg *alg = tfm->__crt_alg; + struct crypto_cipher *ncipher; + struct crypto_tfm *ntfm; + + if (alg->cra_init) + return ERR_PTR(-ENOSYS); + + if (unlikely(!crypto_mod_get(alg))) + return ERR_PTR(-ESTALE); + + ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC); + if (IS_ERR(ntfm)) { + crypto_mod_put(alg); + return ERR_CAST(ntfm); + } + + ntfm->crt_flags = tfm->crt_flags; + + ncipher = __crypto_cipher_cast(ntfm); + + return ncipher; +} +EXPORT_SYMBOL_GPL(crypto_clone_cipher); diff --git a/crypto/cmac.c b/crypto/cmac.c index f4a5d3bfb376..1b03964abe00 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -13,9 +13,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> /* * +------------------------ @@ -28,33 +31,15 @@ */ struct cmac_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; -}; - -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | cmac_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct cmac_desc_ctx { - unsigned int len; - u8 ctx[]; + __be64 consts[]; }; static int crypto_cmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); unsigned int bs = crypto_shash_blocksize(parent); - __be64 *consts = PTR_ALIGN((void *)ctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); + __be64 *consts = ctx->consts; u64 _const[2]; int i, err = 0; u8 msb_mask, gfmask; @@ -104,14 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, static int crypto_cmac_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -119,104 +100,75 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } + u8 *prev = shash_desc_ctx(pdesc); - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); - - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN((void *)tctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len - 1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } + crypto_xor(prev, (const u8 *)tctx->consts + offset, bs); + crypto_cipher_encrypt_one(tfm, out, prev); + return 0; +} - crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); +static int cmac_init_tfm(struct crypto_shash *tfm) +{ + struct shash_instance *inst = shash_alg_instance(tfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + struct crypto_cipher_spawn *spawn; + struct crypto_cipher *cipher; - crypto_cipher_encrypt_one(tfm, out, prev); + spawn = shash_instance_ctx(inst); + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; return 0; } -static int cmac_init_tfm(struct crypto_tfm *tfm) +static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm) { + struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); - cipher = crypto_spawn_cipher(spawn); + cipher = crypto_clone_cipher(octx->child); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; -}; +} -static void cmac_exit_tfm(struct crypto_tfm *tfm) +static void cmac_exit_tfm(struct crypto_shash *tfm) { - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); crypto_free_cipher(ctx->child); } @@ -225,7 +177,6 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -257,30 +208,22 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) + + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = - ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment()) - + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - - inst->alg.base.cra_ctxsize = - ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) - + ((alignmask | (__alignof__(__be64) - 1)) & - ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - - inst->alg.base.cra_init = cmac_init_tfm; - inst->alg.base.cra_exit = cmac_exit_tfm; - + inst->alg.descsize = alg->cra_blocksize; inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; - inst->alg.final = crypto_cmac_digest_final; + inst->alg.finup = crypto_cmac_digest_finup; inst->alg.setkey = crypto_cmac_digest_setkey; + inst->alg.init_tfm = cmac_init_tfm; + inst->alg.clone_tfm = cmac_clone_tfm; + inst->alg.exit_tfm = cmac_exit_tfm; inst->free = shash_free_singlespawn_instance; @@ -308,10 +251,10 @@ static void __exit crypto_cmac_module_exit(void) crypto_unregister_template(&crypto_cmac_tmpl); } -subsys_initcall(crypto_cmac_module_init); +module_init(crypto_cmac_module_init); module_exit(crypto_cmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CMAC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("cmac"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/compress.c b/crypto/compress.c deleted file mode 100644 index 9048fe390c46..000000000000 --- a/crypto/compress.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * Compression operations. - * - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - */ -#include <linux/crypto.h> -#include "internal.h" - -int crypto_comp_compress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_compress); - -int crypto_comp_decompress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_decompress); diff --git a/crypto/compress.h b/crypto/compress.h new file mode 100644 index 000000000000..f7737a1fcbbd --- /dev/null +++ b/crypto/compress.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _LOCAL_CRYPTO_COMPRESS_H +#define _LOCAL_CRYPTO_COMPRESS_H + +#include "internal.h" + +struct acomp_req; +struct comp_alg_common; + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); + +void comp_prepare_alg(struct comp_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_COMPRESS_H */ diff --git a/crypto/crc32_generic.c b/crypto/crc32.c index a989cb44fd16..489cbed9422e 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32.c @@ -7,7 +7,7 @@ * This is crypto api shash wrappers to crc32_le. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <linux/init.h> @@ -64,8 +64,7 @@ static int crc32_update(struct shash_desc *desc, const u8 *data, } /* No final XOR 0xFFFFFFFF, like crc32_le */ -static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { put_unaligned_le32(crc32_le(*crcp, data, len), out); return 0; @@ -88,28 +87,27 @@ static int crc32_final(struct shash_desc *desc, u8 *out) static int crc32_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); + return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, out); } + static struct shash_alg alg = { - .setkey = crc32_setkey, - .init = crc32_init, - .update = crc32_update, - .final = crc32_final, - .finup = crc32_finup, - .digest = crc32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32", - .cra_driver_name = "crc32-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32_cra_init, - } + .setkey = crc32_setkey, + .init = crc32_init, + .update = crc32_update, + .final = crc32_final, + .finup = crc32_finup, + .digest = crc32_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + + .base.cra_name = "crc32", + .base.cra_driver_name = "crc32-lib", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(u32), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32_cra_init, }; static int __init crc32_mod_init(void) @@ -122,11 +120,10 @@ static void __exit crc32_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(crc32_mod_init); +module_init(crc32_mod_init); module_exit(crc32_mod_fini); MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32"); -MODULE_ALIAS_CRYPTO("crc32-generic"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c index 768614738541..1eff54dde2f7 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c.c @@ -30,7 +30,7 @@ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> @@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - ctx->crc = __crc32c_le(ctx->crc, data, length); + ctx->crc = crc32c(ctx->crc, data, length); return 0; } @@ -99,7 +99,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { - put_unaligned_le32(~__crc32c_le(*crcp, data, len), out); + put_unaligned_le32(~crc32c(*crcp, data, len), out); return 0; } @@ -128,24 +128,23 @@ static int crc32c_cra_init(struct crypto_tfm *tfm) } static struct shash_alg alg = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = chksum_setkey, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct chksum_ctx), - .cra_module = THIS_MODULE, - .cra_init = crc32c_cra_init, - } + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + + .base.cra_name = "crc32c", + .base.cra_driver_name = "crc32c-lib", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct chksum_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32c_cra_init, }; static int __init crc32c_mod_init(void) @@ -158,11 +157,10 @@ static void __exit crc32c_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(crc32c_mod_init); +module_init(crc32c_mod_init); module_exit(crc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); -MODULE_ALIAS_CRYPTO("crc32c-generic"); diff --git a/crypto/crct10dif_common.c b/crypto/crct10dif_common.c deleted file mode 100644 index b2fab366f518..000000000000 --- a/crypto/crct10dif_common.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Cryptographic API. - * - * T10 Data Integrity Field CRC16 Crypto Transform - * - * Copyright (c) 2007 Oracle Corporation. All rights reserved. - * Written by Martin K. Petersen <martin.petersen@oracle.com> - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/crc-t10dif.h> -#include <linux/module.h> -#include <linux/kernel.h> - -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; - -__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) -{ - unsigned int i; - - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; - - return crc; -} -EXPORT_SYMBOL(crc_t10dif_generic); - -MODULE_DESCRIPTION("T10 DIF CRC calculation common code"); -MODULE_LICENSE("GPL"); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c deleted file mode 100644 index e843982073bb..000000000000 --- a/crypto/crct10dif_generic.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Cryptographic API. - * - * T10 Data Integrity Field CRC16 Crypto Transform - * - * Copyright (c) 2007 Oracle Corporation. All rights reserved. - * Written by Martin K. Petersen <martin.petersen@oracle.com> - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/module.h> -#include <linux/crc-t10dif.h> -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/kernel.h> - -struct chksum_desc_ctx { - __u16 crc; -}; - -/* - * Steps through buffer one byte at a time, calculates reflected - * crc using table. - */ - -static int chksum_init(struct shash_desc *desc) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = 0; - - return 0; -} - -static int chksum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = crc_t10dif_generic(ctx->crc, data, length); - return 0; -} - -static int chksum_final(struct shash_desc *desc, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - *(__u16 *)out = ctx->crc; - return 0; -} - -static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) -{ - *(__u16 *)out = crc_t10dif_generic(crc, data, len); - return 0; -} - -static int chksum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(ctx->crc, data, len, out); -} - -static int chksum_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - return __chksum_finup(0, data, length, out); -} - -static struct shash_alg alg = { - .digestsize = CRC_T10DIF_DIGEST_SIZE, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crct10dif", - .cra_driver_name = "crct10dif-generic", - .cra_priority = 100, - .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init crct10dif_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crct10dif_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(crct10dif_mod_init); -module_exit(crct10dif_mod_fini); - -MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); -MODULE_DESCRIPTION("T10 DIF CRC calculation."); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crct10dif"); -MODULE_ALIAS_CRYPTO("crct10dif-generic"); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index a1bea0f4baa8..cd38f4676176 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -34,11 +34,16 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); static struct workqueue_struct *cryptd_wq; struct cryptd_cpu_queue { + local_lock_t bh_lock; struct crypto_queue queue; struct work_struct work; }; struct cryptd_queue { + /* + * Protected by disabling BH to allow enqueueing from softinterrupt and + * dequeuing from kworker (cryptd_queue_worker()). + */ struct cryptd_cpu_queue __percpu *cpu_queue; }; @@ -64,11 +69,11 @@ struct aead_instance_ctx { struct cryptd_skcipher_ctx { refcount_t refcnt; - struct crypto_sync_skcipher *child; + struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { - crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { @@ -78,6 +83,7 @@ struct cryptd_hash_ctx { struct cryptd_hash_request_ctx { crypto_completion_t complete; + void *data; struct shash_desc desc; }; @@ -87,7 +93,7 @@ struct cryptd_aead_ctx { }; struct cryptd_aead_request_ctx { - crypto_completion_t complete; + struct aead_request req; }; static void cryptd_queue_worker(struct work_struct *work); @@ -105,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + local_lock_init(&cpu_queue->bh_lock); } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; @@ -125,28 +132,30 @@ static void cryptd_fini_queue(struct cryptd_queue *queue) static int cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) { - int cpu, err; + int err; struct cryptd_cpu_queue *cpu_queue; refcount_t *refcnt; - cpu = get_cpu(); + local_bh_disable(); + local_lock_nested_bh(&queue->cpu_queue->bh_lock); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); if (err == -ENOSPC) - goto out_put_cpu; + goto out; - queue_work_on(cpu, cryptd_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); if (!refcount_read(refcnt)) - goto out_put_cpu; + goto out; refcount_inc(refcnt); -out_put_cpu: - put_cpu(); +out: + local_unlock_nested_bh(&queue->cpu_queue->bh_lock); + local_bh_enable(); return err; } @@ -162,23 +171,20 @@ static void cryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct cryptd_cpu_queue, work); /* * Only handle one request at a time to avoid hogging crypto workqueue. - * preempt_disable/enable is used to prevent being preempted by - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent - * cryptd_enqueue_request() being accessed from software interrupts. */ local_bh_disable(); - preempt_disable(); + __local_lock_nested_bh(&cpu_queue->bh_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); + __local_unlock_nested_bh(&cpu_queue->bh_lock); local_bh_enable(); if (!req) return; if (backlog) - backlog->complete(backlog, -EINPROGRESS); - req->complete(req, 0); + crypto_request_complete(backlog, -EINPROGRESS); + crypto_request_complete(req, 0); if (cpu_queue->queue.qlen) queue_work(cryptd_wq, &cpu_queue->work); @@ -228,84 +234,85 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_sync_skcipher *child = ctx->child; + struct crypto_skcipher *child = ctx->child; - crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(child, key, keylen); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } -static void cryptd_skcipher_complete(struct skcipher_request *req, int err) +static struct skcipher_request *cryptd_skcipher_prepare( + struct skcipher_request *req, int err) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - int refcnt = refcount_read(&ctx->refcnt); - - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + struct skcipher_request *subreq = &rctx->req; + struct cryptd_skcipher_ctx *ctx; + struct crypto_skcipher *child; - if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) - crypto_free_skcipher(tfm); -} - -static void cryptd_skcipher_encrypt(struct crypto_async_request *base, - int err) -{ - struct skcipher_request *req = skcipher_request_cast(base); - struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + req->base.complete = subreq->base.complete; + req->base.data = subreq->base.data; if (unlikely(err == -EINPROGRESS)) - goto out; + return NULL; - skcipher_request_set_sync_tfm(subreq, child); + ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + child = ctx->child; + + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - - req->base.complete = rctx->complete; - -out: - cryptd_skcipher_complete(req, err); + return subreq; } -static void cryptd_skcipher_decrypt(struct crypto_async_request *base, - int err) +static void cryptd_skcipher_complete(struct skcipher_request *req, int err, + crypto_completion_t complete) { - struct skcipher_request *req = skcipher_request_cast(base); struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + int refcnt = refcount_read(&ctx->refcnt); - if (unlikely(err == -EINPROGRESS)) - goto out; + local_bh_disable(); + skcipher_request_complete(req, err); + local_bh_enable(); - skcipher_request_set_sync_tfm(subreq, child); - skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, - req->iv); + if (unlikely(err == -EINPROGRESS)) { + subreq->base.complete = req->base.complete; + subreq->base.data = req->base.data; + req->base.complete = complete; + req->base.data = req; + } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) + crypto_free_skcipher(tfm); +} - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); +static void cryptd_skcipher_encrypt(void *data, int err) +{ + struct skcipher_request *req = data; + struct skcipher_request *subreq; - req->base.complete = rctx->complete; + subreq = cryptd_skcipher_prepare(req, err); + if (likely(subreq)) + err = crypto_skcipher_encrypt(subreq); -out: - cryptd_skcipher_complete(req, err); + cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt); +} + +static void cryptd_skcipher_decrypt(void *data, int err) +{ + struct skcipher_request *req = data; + struct skcipher_request *subreq; + + subreq = cryptd_skcipher_prepare(req, err); + if (likely(subreq)) + err = crypto_skcipher_decrypt(subreq); + + cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt); } static int cryptd_skcipher_enqueue(struct skcipher_request *req, @@ -313,11 +320,14 @@ static int cryptd_skcipher_enqueue(struct skcipher_request *req, { struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *subreq = &rctx->req; struct cryptd_queue *queue; queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); - rctx->complete = req->base.complete; + subreq->base.complete = req->base.complete; + subreq->base.data = req->base.data; req->base.complete = compl; + req->base.data = req; return cryptd_enqueue_request(queue, &req->base); } @@ -344,9 +354,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = (struct crypto_sync_skcipher *)cipher; + ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -354,7 +365,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->child); + crypto_free_skcipher(ctx->child); } static void cryptd_skcipher_free(struct skcipher_instance *inst) @@ -372,7 +383,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, { struct skcipherd_instance_ctx *ctx; struct skcipher_instance *inst; - struct skcipher_alg *alg; + struct skcipher_alg_common *alg; u32 type; u32 mask; int err; @@ -391,17 +402,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); if (err) goto err_free_inst; inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.ivsize = alg->ivsize; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); @@ -422,12 +433,12 @@ err_free_inst: return err; } -static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) +static int cryptd_hash_init_tfm(struct crypto_ahash *tfm) { - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct ahash_instance *inst = ahash_alg_instance(tfm); + struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst); struct crypto_shash_spawn *spawn = &ictx->spawn; - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_shash *hash; hash = crypto_spawn_shash(spawn); @@ -435,15 +446,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) return PTR_ERR(hash); ctx->child = hash; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + crypto_ahash_set_reqsize(tfm, sizeof(struct cryptd_hash_request_ctx) + crypto_shash_descsize(hash)); return 0; } -static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) +static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm, + struct crypto_ahash *tfm) +{ + struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct crypto_shash *hash; + + hash = crypto_clone_shash(ctx->child); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + nctx->child = hash; + return 0; +} + +static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); crypto_free_shash(ctx->child); } @@ -469,45 +495,63 @@ static int cryptd_hash_enqueue(struct ahash_request *req, cryptd_get_queue(crypto_ahash_tfm(tfm)); rctx->complete = req->base.complete; + rctx->data = req->base.data; req->base.complete = compl; + req->base.data = req; return cryptd_enqueue_request(queue, &req->base); } -static void cryptd_hash_complete(struct ahash_request *req, int err) +static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req, + int err) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + + req->base.complete = rctx->complete; + req->base.data = rctx->data; + + if (unlikely(err == -EINPROGRESS)) + return NULL; + + return &rctx->desc; +} + +static void cryptd_hash_complete(struct ahash_request *req, int err, + crypto_completion_t complete) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); int refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); - rctx->complete(&req->base, err); + ahash_request_complete(req, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) + if (err == -EINPROGRESS) { + req->base.complete = complete; + req->base.data = req; + } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_ahash(tfm); } -static void cryptd_hash_init(struct crypto_async_request *req_async, int err) +static void cryptd_hash_init(void *data, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct ahash_request *req = data; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_shash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct shash_desc *desc = &rctx->desc; + struct shash_desc *desc; - if (unlikely(err == -EINPROGRESS)) + desc = cryptd_hash_prepare(req, err); + if (unlikely(!desc)) goto out; desc->tfm = child; err = crypto_shash_init(desc); - req->base.complete = rctx->complete; - out: - cryptd_hash_complete(req, err); + cryptd_hash_complete(req, err, cryptd_hash_init); } static int cryptd_hash_init_enqueue(struct ahash_request *req) @@ -515,22 +559,16 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_init); } -static void cryptd_hash_update(struct crypto_async_request *req_async, int err) +static void cryptd_hash_update(void *data, int err) { - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx; + struct ahash_request *req = data; + struct shash_desc *desc; - rctx = ahash_request_ctx(req); + desc = cryptd_hash_prepare(req, err); + if (likely(desc)) + err = shash_ahash_update(req, desc); - if (unlikely(err == -EINPROGRESS)) - goto out; - - err = shash_ahash_update(req, &rctx->desc); - - req->base.complete = rctx->complete; - -out: - cryptd_hash_complete(req, err); + cryptd_hash_complete(req, err, cryptd_hash_update); } static int cryptd_hash_update_enqueue(struct ahash_request *req) @@ -538,20 +576,16 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_update); } -static void cryptd_hash_final(struct crypto_async_request *req_async, int err) +static void cryptd_hash_final(void *data, int err) { - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct ahash_request *req = data; + struct shash_desc *desc; - if (unlikely(err == -EINPROGRESS)) - goto out; - - err = crypto_shash_final(&rctx->desc, req->result); - - req->base.complete = rctx->complete; + desc = cryptd_hash_prepare(req, err); + if (likely(desc)) + err = crypto_shash_final(desc, req->result); -out: - cryptd_hash_complete(req, err); + cryptd_hash_complete(req, err, cryptd_hash_final); } static int cryptd_hash_final_enqueue(struct ahash_request *req) @@ -559,20 +593,16 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_final); } -static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) +static void cryptd_hash_finup(void *data, int err) { - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct ahash_request *req = data; + struct shash_desc *desc; - if (unlikely(err == -EINPROGRESS)) - goto out; - - err = shash_ahash_finup(req, &rctx->desc); - - req->base.complete = rctx->complete; + desc = cryptd_hash_prepare(req, err); + if (likely(desc)) + err = shash_ahash_finup(req, desc); -out: - cryptd_hash_complete(req, err); + cryptd_hash_complete(req, err, cryptd_hash_finup); } static int cryptd_hash_finup_enqueue(struct ahash_request *req) @@ -580,25 +610,24 @@ static int cryptd_hash_finup_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_finup); } -static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) +static void cryptd_hash_digest(void *data, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct ahash_request *req = data; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_shash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct shash_desc *desc = &rctx->desc; + struct shash_desc *desc; - if (unlikely(err == -EINPROGRESS)) + desc = cryptd_hash_prepare(req, err); + if (unlikely(!desc)) goto out; desc->tfm = child; err = shash_ahash_digest(req, desc); - req->base.complete = rctx->complete; - out: - cryptd_hash_complete(req, err); + cryptd_hash_complete(req, err, cryptd_hash_digest); } static int cryptd_hash_digest_enqueue(struct ahash_request *req) @@ -669,8 +698,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); - inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; - inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; + inst->alg.init_tfm = cryptd_hash_init_tfm; + inst->alg.clone_tfm = cryptd_hash_clone_tfm; + inst->alg.exit_tfm = cryptd_hash_exit_tfm; inst->alg.init = cryptd_hash_init_enqueue; inst->alg.update = cryptd_hash_update_enqueue; @@ -711,56 +741,74 @@ static int cryptd_aead_setauthsize(struct crypto_aead *parent, } static void cryptd_aead_crypt(struct aead_request *req, - struct crypto_aead *child, - int err, - int (*crypt)(struct aead_request *req)) + struct crypto_aead *child, int err, + int (*crypt)(struct aead_request *req), + crypto_completion_t compl) { struct cryptd_aead_request_ctx *rctx; + struct aead_request *subreq; struct cryptd_aead_ctx *ctx; - crypto_completion_t compl; struct crypto_aead *tfm; int refcnt; rctx = aead_request_ctx(req); - compl = rctx->complete; + subreq = &rctx->req; + req->base.complete = subreq->base.complete; + req->base.data = subreq->base.data; tfm = crypto_aead_reqtfm(req); if (unlikely(err == -EINPROGRESS)) goto out; - aead_request_set_tfm(req, child); - err = crypt( req ); + + aead_request_set_tfm(subreq, child); + aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + err = crypt(subreq); out: ctx = crypto_aead_ctx(tfm); refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); - compl(&req->base, err); + aead_request_complete(req, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) + if (err == -EINPROGRESS) { + subreq->base.complete = req->base.complete; + subreq->base.data = req->base.data; + req->base.complete = compl; + req->base.data = req; + } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_aead(tfm); } -static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) +static void cryptd_aead_encrypt(void *data, int err) { - struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); - struct crypto_aead *child = ctx->child; - struct aead_request *req; + struct aead_request *req = data; + struct cryptd_aead_ctx *ctx; + struct crypto_aead *child; - req = container_of(areq, struct aead_request, base); - cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + child = ctx->child; + cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt, + cryptd_aead_encrypt); } -static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) +static void cryptd_aead_decrypt(void *data, int err) { - struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); - struct crypto_aead *child = ctx->child; - struct aead_request *req; + struct aead_request *req = data; + struct cryptd_aead_ctx *ctx; + struct crypto_aead *child; - req = container_of(areq, struct aead_request, base); - cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + child = ctx->child; + cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt, + cryptd_aead_decrypt); } static int cryptd_aead_enqueue(struct aead_request *req, @@ -769,9 +817,12 @@ static int cryptd_aead_enqueue(struct aead_request *req, struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); + struct aead_request *subreq = &rctx->req; - rctx->complete = req->base.complete; + subreq->base.complete = req->base.complete; + subreq->base.data = req->base.data; req->base.complete = compl; + req->base.data = req; return cryptd_enqueue_request(queue, &req->base); } @@ -799,8 +850,8 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm) ctx->child = cipher; crypto_aead_set_reqsize( - tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), - crypto_aead_reqsize(cipher))); + tfm, sizeof(struct cryptd_aead_request_ctx) + + crypto_aead_reqsize(cipher)); return 0; } @@ -884,7 +935,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: return cryptd_create_hash(tmpl, tb, algt, &queue); @@ -932,7 +983,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return &ctx->child->base; + return ctx->child; } EXPORT_SYMBOL_GPL(cryptd_skcipher_child); @@ -1064,7 +1115,8 @@ static int __init cryptd_init(void) { int err; - cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, + cryptd_wq = alloc_workqueue("cryptd", + WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE | WQ_PERCPU, 1); if (!cryptd_wq) return -ENOMEM; @@ -1093,7 +1145,7 @@ static void __exit cryptd_exit(void) crypto_unregister_template(&cryptd_tmpl); } -subsys_initcall(cryptd_init); +module_init(cryptd_init); module_exit(cryptd_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index cff21f4e03e3..18e1689efe12 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -7,15 +7,27 @@ * Author: Baolin Wang <baolin.wang@linaro.org> */ +#include <crypto/internal/aead.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/engine.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/kpp.h> +#include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/device.h> -#include <crypto/engine.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <uapi/linux/sched/types.h> #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 +struct crypto_engine_alg { + struct crypto_alg base; + struct crypto_engine_op op; +}; + /** * crypto_finalize_request - finalize one request if the request is done * @engine: the hardware engine @@ -26,9 +38,6 @@ static void crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_req = false; - int ret; - struct crypto_engine_ctx *enginectx; /* * If hardware cannot enqueue more requests @@ -38,22 +47,13 @@ static void crypto_finalize_request(struct crypto_engine *engine, if (!engine->retry_support) { spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) { - finalize_req = true; engine->cur_req = NULL; } spin_unlock_irqrestore(&engine->queue_lock, flags); } - if (finalize_req || engine->retry_support) { - enginectx = crypto_tfm_ctx(req->tfm); - if (enginectx->op.prepare_request && - enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - } - req->complete(req, err); + lockdep_assert_in_softirq(); + crypto_request_complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); } @@ -71,10 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine, bool in_kthread) { struct crypto_async_request *async_req, *backlog; + struct crypto_engine_alg *alg; + struct crypto_engine_op *op; unsigned long flags; - bool was_busy = false; int ret; - struct crypto_engine_ctx *enginectx; spin_lock_irqsave(&engine->queue_lock, flags); @@ -82,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (!engine->retry_support && engine->cur_req) goto out; - /* If another context is idling then defer */ - if (engine->idling) { - kthread_queue_work(engine->kworker, &engine->pump_requests); - goto out; - } - /* Check if the engine queue is idle */ if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!engine->busy) @@ -101,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, } engine->busy = false; - engine->idling = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); - - if (engine->unprepare_crypt_hardware && - engine->unprepare_crypt_hardware(engine)) - dev_err(engine->dev, "failed to unprepare crypt hardware\n"); - - spin_lock_irqsave(&engine->queue_lock, flags); - engine->idling = false; goto out; } @@ -128,42 +113,15 @@ start_request: if (!engine->retry_support) engine->cur_req = async_req; - if (backlog) - backlog->complete(backlog, -EINPROGRESS); - - if (engine->busy) - was_busy = true; - else + if (!engine->busy) engine->busy = true; spin_unlock_irqrestore(&engine->queue_lock, flags); - /* Until here we get the request need to be encrypted successfully */ - if (!was_busy && engine->prepare_crypt_hardware) { - ret = engine->prepare_crypt_hardware(engine); - if (ret) { - dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err_2; - } - } - - enginectx = crypto_tfm_ctx(async_req->tfm); - - if (enginectx->op.prepare_request) { - ret = enginectx->op.prepare_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "failed to prepare request: %d\n", - ret); - goto req_err_2; - } - } - if (!enginectx->op.do_one_request) { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; - } - - ret = enginectx->op.do_one_request(engine, async_req); + alg = container_of(async_req->tfm->__crt_alg, + struct crypto_engine_alg, base); + op = &alg->op; + ret = op->do_one_request(engine, async_req); /* Request unsuccessfully executed by hardware */ if (ret < 0) { @@ -179,18 +137,6 @@ start_request: ret); goto req_err_1; } - /* - * If retry mechanism is supported, - * unprepare current request and - * enqueue it back into crypto-engine queue. - */ - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, - async_req); - if (ret) - dev_err(engine->dev, - "failed to unprepare request\n"); - } spin_lock_irqsave(&engine->queue_lock, flags); /* * If hardware was unable to execute request, enqueue it @@ -206,16 +152,12 @@ start_request: goto retry; req_err_1: - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, async_req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - -req_err_2: - async_req->complete(async_req, ret); + crypto_request_complete(async_req, ret); retry: + if (backlog) + crypto_request_complete(backlog, -EINPROGRESS); + /* If retry mechanism is supported, send new requests to engine */ if (engine->retry_support) { spin_lock_irqsave(&engine->queue_lock, flags); @@ -226,17 +168,6 @@ retry: out: spin_unlock_irqrestore(&engine->queue_lock, flags); - /* - * Batch requests is possible only if - * hardware can enqueue multiple requests - */ - if (engine->do_batch_requests) { - ret = engine->do_batch_requests(engine); - if (ret) - dev_err(engine->dev, "failed to do batch requests: %d\n", - ret); - } - return; } @@ -252,6 +183,7 @@ static void crypto_pump_work(struct kthread_work *work) * crypto_transfer_request - transfer the new request into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue + * @need_pump: indicates whether queue the pump of request to kthread_work */ static int crypto_transfer_request(struct crypto_engine *engine, struct crypto_async_request *req, @@ -328,6 +260,19 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); /** + * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list + * into the engine queue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, + struct kpp_request *req) +{ + return crypto_transfer_request_to_engine(engine, &req->base); +} +EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); + +/** * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request * to list into the engine queue * @engine: the hardware engine @@ -383,6 +328,19 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); /** + * crypto_finalize_kpp_request - finalize one kpp_request if the request is done + * @engine: the hardware engine + * @req: the request need to be finalized + * @err: error number + */ +void crypto_finalize_kpp_request(struct crypto_engine *engine, + struct kpp_request *req, int err) +{ + return crypto_finalize_request(engine, &req->base, err); +} +EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); + +/** * crypto_finalize_skcipher_request - finalize one skcipher_request if * the request is done * @engine: the hardware engine @@ -466,12 +424,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); * crypto-engine queue. * @dev: the device attached with one hardware engine * @retry_support: whether hardware has support for retry mechanism - * @cbk_do_batch: pointer to a callback function to be invoked when executing - * a batch of requests. - * This has the form: - * callback(struct crypto_engine *engine) - * where: - * @engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task * @qlen: maximum size of the crypto-engine queue * @@ -480,7 +432,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); */ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, - int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen) { struct crypto_engine *engine; @@ -496,14 +447,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, engine->rt = rt; engine->running = false; engine->busy = false; - engine->idling = false; engine->retry_support = retry_support; engine->priv_data = dev; - /* - * Batch requests is possible only if - * hardware has support for retry mechanism. - */ - engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); @@ -511,7 +456,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); - engine->kworker = kthread_create_worker(0, "%s", engine->name); + engine->kworker = kthread_run_worker(0, "%s", engine->name); if (IS_ERR(engine->kworker)) { dev_err(dev, "failed to create crypto request pump task\n"); return NULL; @@ -538,7 +483,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { - return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, + return crypto_engine_alloc_init_and_set(dev, false, rt, CRYPTO_ENGINE_MAX_QLEN); } EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); @@ -546,22 +491,175 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** * crypto_engine_exit - free the resources of hardware engine when exit * @engine: the hardware engine need to be freed - * - * Return 0 for success. */ -int crypto_engine_exit(struct crypto_engine *engine) +void crypto_engine_exit(struct crypto_engine *engine) { int ret; ret = crypto_engine_stop(engine); if (ret) - return ret; + return; kthread_destroy_worker(engine->kworker); +} +EXPORT_SYMBOL_GPL(crypto_engine_exit); + +int crypto_engine_register_aead(struct aead_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + return crypto_register_aead(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_aead); + +void crypto_engine_unregister_aead(struct aead_engine_alg *alg) +{ + crypto_unregister_aead(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead); + +int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_aead(&algs[i]); + if (ret) + goto err; + } return 0; + +err: + crypto_engine_unregister_aeads(algs, i); + + return ret; } -EXPORT_SYMBOL_GPL(crypto_engine_exit); +EXPORT_SYMBOL_GPL(crypto_engine_register_aeads); + +void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_aead(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads); + +int crypto_engine_register_ahash(struct ahash_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + return crypto_register_ahash(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); + +void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg) +{ + crypto_unregister_ahash(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash); + +int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_ahash(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + crypto_engine_unregister_ahashes(algs, i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes); + +void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, + int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_ahash(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes); + +int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + return crypto_register_akcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); + +void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg) +{ + crypto_unregister_akcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher); + +int crypto_engine_register_kpp(struct kpp_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + return crypto_register_kpp(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); + +void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg) +{ + crypto_unregister_kpp(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp); + +int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + return crypto_register_skcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); + +void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg) +{ + return crypto_unregister_skcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher); + +int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, + int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + crypto_engine_unregister_skciphers(algs, i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers); + +void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, + int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Crypto hardware engine framework"); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 5b84b0f7cc17..34588f39fdfc 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -15,25 +15,11 @@ #include <crypto/null.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/mm.h> #include <linux/string.h> -static DEFINE_MUTEX(crypto_default_null_skcipher_lock); -static struct crypto_sync_skcipher *crypto_default_null_skcipher; -static int crypto_default_null_skcipher_refcnt; - -static int null_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - if (slen > *dlen) - return -EINVAL; - memcpy(dst, src, slen); - *dlen = slen; - return 0; -} - static int null_init(struct shash_desc *desc) { return 0; @@ -75,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static int null_skcipher_crypt(struct skcipher_request *req) { - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr != walk.dst.virt.addr) - memcpy(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, req->cryptlen); + return 0; } static struct shash_alg digest_null = { @@ -121,7 +97,7 @@ static struct skcipher_alg skcipher_null = { .decrypt = null_skcipher_crypt, }; -static struct crypto_alg null_algs[] = { { +static struct crypto_alg cipher_null = { .cra_name = "cipher_null", .cra_driver_name = "cipher_null-generic", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, @@ -134,62 +110,16 @@ static struct crypto_alg null_algs[] = { { .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } -}, { - .cra_name = "compress_null", - .cra_driver_name = "compress_null-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_u = { .compress = { - .coa_compress = null_compress, - .coa_decompress = null_compress } } -} }; +}; -MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); -struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) -{ - struct crypto_sync_skcipher *tfm; - - mutex_lock(&crypto_default_null_skcipher_lock); - tfm = crypto_default_null_skcipher; - - if (!tfm) { - tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); - if (IS_ERR(tfm)) - goto unlock; - - crypto_default_null_skcipher = tfm; - } - - crypto_default_null_skcipher_refcnt++; - -unlock: - mutex_unlock(&crypto_default_null_skcipher_lock); - - return tfm; -} -EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); - -void crypto_put_default_null_skcipher(void) -{ - mutex_lock(&crypto_default_null_skcipher_lock); - if (!--crypto_default_null_skcipher_refcnt) { - crypto_free_sync_skcipher(crypto_default_null_skcipher); - crypto_default_null_skcipher = NULL; - } - mutex_unlock(&crypto_default_null_skcipher_lock); -} -EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); - static int __init crypto_null_mod_init(void) { int ret = 0; - ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs)); + ret = crypto_register_alg(&cipher_null); if (ret < 0) goto out; @@ -206,19 +136,19 @@ static int __init crypto_null_mod_init(void) out_unregister_shash: crypto_unregister_shash(&digest_null); out_unregister_algs: - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); out: return ret; } static void __exit crypto_null_mod_fini(void) { - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); crypto_unregister_shash(&digest_null); crypto_unregister_skcipher(&skcipher_null); } -subsys_initcall(crypto_null_mod_init); +module_init(crypto_null_mod_init); module_exit(crypto_null_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user.c index 3fa20f12989f..aad429bef03e 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user.c @@ -18,7 +18,6 @@ #include <crypto/internal/rng.h> #include <crypto/akcipher.h> #include <crypto/kpp.h> -#include <crypto/internal/cryptouser.h> #include "internal.h" @@ -33,7 +32,7 @@ struct crypto_dump_info { u16 nlmsg_flags; }; -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) +static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; @@ -85,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) sizeof(rcipher), &rcipher); } -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_report_comp rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp); -} - static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -137,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - - break; } out: @@ -387,6 +370,13 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, return crypto_del_default_rng(); } +static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + /* No longer supported */ + return -ENOTSUPP; +} + #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c deleted file mode 100644 index 154884bf9275..000000000000 --- a/crypto/crypto_user_stat.c +++ /dev/null @@ -1,335 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Crypto user configuration API. - * - * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com> - * - */ - -#include <linux/crypto.h> -#include <linux/cryptouser.h> -#include <linux/sched.h> -#include <net/netlink.h> -#include <net/sock.h> -#include <crypto/internal/skcipher.h> -#include <crypto/internal/rng.h> -#include <crypto/akcipher.h> -#include <crypto/kpp.h> -#include <crypto/internal/cryptouser.h> - -#include "internal.h" - -#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) - -struct crypto_dump_info { - struct sk_buff *in_skb; - struct sk_buff *out_skb; - u32 nlmsg_seq; - u16 nlmsg_flags; -}; - -static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_aead raead; - - memset(&raead, 0, sizeof(raead)); - - strscpy(raead.type, "aead", sizeof(raead.type)); - - raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt); - raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen); - raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt); - raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen); - raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); -} - -static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_cipher rcipher; - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen); - rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_compress rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); - rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); - rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); - rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); - rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); -} - -static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_compress racomp; - - memset(&racomp, 0, sizeof(racomp)); - - strscpy(racomp.type, "acomp", sizeof(racomp.type)); - racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); - racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); - racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); - racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); - racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); -} - -static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_akcipher rakcipher; - - memset(&rakcipher, 0, sizeof(rakcipher)); - - strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); - rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt); - rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen); - rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt); - rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen); - rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt); - rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt); - rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, - sizeof(rakcipher), &rakcipher); -} - -static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_kpp rkpp; - - memset(&rkpp, 0, sizeof(rkpp)); - - strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); - - rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt); - rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt); - rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt); - rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); -} - -static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_hash rhash; - - memset(&rhash, 0, sizeof(rhash)); - - strscpy(rhash.type, "ahash", sizeof(rhash.type)); - - rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); - rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); -} - -static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_hash rhash; - - memset(&rhash, 0, sizeof(rhash)); - - strscpy(rhash.type, "shash", sizeof(rhash.type)); - - rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); - rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); -} - -static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_rng rrng; - - memset(&rrng, 0, sizeof(rrng)); - - strscpy(rrng.type, "rng", sizeof(rrng.type)); - - rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt); - rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen); - rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt); - rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); -} - -static int crypto_reportstat_one(struct crypto_alg *alg, - struct crypto_user_alg *ualg, - struct sk_buff *skb) -{ - memset(ualg, 0, sizeof(*ualg)); - - strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strscpy(ualg->cru_driver_name, alg->cra_driver_name, - sizeof(ualg->cru_driver_name)); - strscpy(ualg->cru_module_name, module_name(alg->cra_module), - sizeof(ualg->cru_module_name)); - - ualg->cru_type = 0; - ualg->cru_mask = 0; - ualg->cru_flags = alg->cra_flags; - ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); - - if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) - goto nla_put_failure; - if (alg->cra_flags & CRYPTO_ALG_LARVAL) { - struct crypto_stat_larval rl; - - memset(&rl, 0, sizeof(rl)); - strscpy(rl.type, "larval", sizeof(rl.type)); - if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) - goto nla_put_failure; - goto out; - } - - switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { - case CRYPTO_ALG_TYPE_AEAD: - if (crypto_report_aead(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_SKCIPHER: - if (crypto_report_cipher(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_CIPHER: - if (crypto_report_cipher(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_ACOMPRESS: - if (crypto_report_acomp(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_SCOMPRESS: - if (crypto_report_acomp(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_AKCIPHER: - if (crypto_report_akcipher(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_KPP: - if (crypto_report_kpp(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_AHASH: - if (crypto_report_ahash(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_HASH: - if (crypto_report_shash(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_RNG: - if (crypto_report_rng(skb, alg)) - goto nla_put_failure; - break; - default: - pr_err("ERROR: Unhandled alg %d in %s\n", - alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), - __func__); - } - -out: - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -static int crypto_reportstat_alg(struct crypto_alg *alg, - struct crypto_dump_info *info) -{ - struct sk_buff *in_skb = info->in_skb; - struct sk_buff *skb = info->out_skb; - struct nlmsghdr *nlh; - struct crypto_user_alg *ualg; - int err = 0; - - nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, - CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); - if (!nlh) { - err = -EMSGSIZE; - goto out; - } - - ualg = nlmsg_data(nlh); - - err = crypto_reportstat_one(alg, ualg, skb); - if (err) { - nlmsg_cancel(skb, nlh); - goto out; - } - - nlmsg_end(skb, nlh); - -out: - return err; -} - -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - struct net *net = sock_net(in_skb->sk); - struct crypto_user_alg *p = nlmsg_data(in_nlh); - struct crypto_alg *alg; - struct sk_buff *skb; - struct crypto_dump_info info; - int err; - - if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) - return -EINVAL; - - alg = crypto_alg_match(p, 0); - if (!alg) - return -ENOENT; - - err = -ENOMEM; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); - if (!skb) - goto drop_alg; - - info.in_skb = in_skb; - info.out_skb = skb; - info.nlmsg_seq = in_nlh->nlmsg_seq; - info.nlmsg_flags = 0; - - err = crypto_reportstat_alg(alg, &info); - -drop_alg: - crypto_mod_put(alg); - - if (err) { - kfree_skb(skb); - return err; - } - - return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); -} - -MODULE_LICENSE("GPL"); diff --git a/crypto/ctr.c b/crypto/ctr.c index 23c698b22013..a388f0ceb3a0 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk, u8 *ctrblk = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, unsigned int bsize = crypto_cipher_blocksize(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; + u8 *dst = walk->dst.virt.addr; u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - crypto_xor(src, keystream, bsize); + crypto_xor(dst, keystream, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -258,8 +258,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; u32 mask; int err; @@ -278,11 +278,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); /* We only support 16-byte blocks. */ err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) + if (alg->ivsize != CTR_RFC3686_BLOCK_SIZE) goto err_free_inst; /* Not a stream cipher? */ @@ -303,11 +303,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.max_keysize = alg->max_keysize + CTR_RFC3686_NONCE_SIZE; inst->alg.setkey = crypto_rfc3686_setkey; inst->alg.encrypt = crypto_rfc3686_crypt; @@ -352,11 +350,11 @@ static void __exit crypto_ctr_module_exit(void) ARRAY_SIZE(crypto_ctr_tmpls)); } -subsys_initcall(crypto_ctr_module_init); +module_init(crypto_ctr_module_init); module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/cts.c b/crypto/cts.c index 3766d47ebcc0..48898d5e24ff 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -85,9 +85,9 @@ static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key, return crypto_skcipher_setkey(child, key, keylen); } -static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err) +static void cts_cbc_crypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (err == -EINPROGRESS) return; @@ -125,9 +125,9 @@ static int cts_cbc_encrypt(struct skcipher_request *req) return crypto_skcipher_encrypt(subreq); } -static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) +static void crypto_cts_encrypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (err) goto out; @@ -219,9 +219,9 @@ static int cts_cbc_decrypt(struct skcipher_request *req) return crypto_skcipher_decrypt(subreq); } -static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) +static void crypto_cts_decrypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (err) goto out; @@ -324,8 +324,8 @@ static void crypto_cts_free(struct skcipher_instance *inst) static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; u32 mask; int err; @@ -344,10 +344,10 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize) + if (alg->ivsize != alg->base.cra_blocksize) goto err_free_inst; if (strncmp(alg->base.cra_name, "cbc(", 4)) @@ -363,9 +363,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = alg->base.cra_blocksize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx); @@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void) crypto_unregister_template(&crypto_cts_tmpl); } -subsys_initcall(crypto_cts_module_init); +module_init(crypto_cts_module_init); module_exit(crypto_cts_module_exit); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c deleted file mode 100644 index bd88fd571393..000000000000 --- a/crypto/curve25519-generic.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -#include <crypto/curve25519.h> -#include <crypto/internal/kpp.h> -#include <crypto/kpp.h> -#include <linux/module.h> -#include <linux/scatterlist.h> - -static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, - unsigned int len) -{ - u8 *secret = kpp_tfm_ctx(tfm); - - if (!len) - curve25519_generate_secret(secret); - else if (len == CURVE25519_KEY_SIZE && - crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) - memcpy(secret, buf, CURVE25519_KEY_SIZE); - else - return -EINVAL; - return 0; -} - -static int curve25519_compute_value(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - const u8 *secret = kpp_tfm_ctx(tfm); - u8 public_key[CURVE25519_KEY_SIZE]; - u8 buf[CURVE25519_KEY_SIZE]; - int copied, nbytes; - u8 const *bp; - - if (req->src) { - copied = sg_copy_to_buffer(req->src, - sg_nents_for_len(req->src, - CURVE25519_KEY_SIZE), - public_key, CURVE25519_KEY_SIZE); - if (copied != CURVE25519_KEY_SIZE) - return -EINVAL; - bp = public_key; - } else { - bp = curve25519_base_point; - } - - curve25519_generic(buf, secret, bp); - - /* might want less than we've got */ - nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); - copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, - nbytes), - buf, nbytes); - if (copied != nbytes) - return -EINVAL; - return 0; -} - -static unsigned int curve25519_max_size(struct crypto_kpp *tfm) -{ - return CURVE25519_KEY_SIZE; -} - -static struct kpp_alg curve25519_alg = { - .base.cra_name = "curve25519", - .base.cra_driver_name = "curve25519-generic", - .base.cra_priority = 100, - .base.cra_module = THIS_MODULE, - .base.cra_ctxsize = CURVE25519_KEY_SIZE, - - .set_secret = curve25519_set_secret, - .generate_public_key = curve25519_compute_value, - .compute_shared_secret = curve25519_compute_value, - .max_size = curve25519_max_size, -}; - -static int curve25519_init(void) -{ - return crypto_register_kpp(&curve25519_alg); -} - -static void curve25519_exit(void) -{ - crypto_unregister_kpp(&curve25519_alg); -} - -subsys_initcall(curve25519_init); -module_exit(curve25519_exit); - -MODULE_ALIAS_CRYPTO("curve25519"); -MODULE_ALIAS_CRYPTO("curve25519-generic"); -MODULE_LICENSE("GPL"); diff --git a/crypto/deflate.c b/crypto/deflate.c index b2a46f6dc961..a3e1fff55661 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -6,335 +6,256 @@ * by IPCOMP (RFC 3173 & RFC 2394). * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> - * - * FIXME: deflate transforms will require up to a total of about 436k of kernel - * memory on i386 (390k for compression, the rest for decompression), as the - * current zlib kernel code uses a worst case pre-allocation system by default. - * This needs to be fixed so that the amount of memory required is properly - * related to the winbits and memlevel parameters. - * - * The default winbits of 11 should suit most packets, and it may be something - * to configure on a per-tfm basis in the future. - * - * Currently, compression history is not maintained between tfm calls, as - * it is not needed for IPCOMP and keeps the code simpler. It can be - * implemented if someone wants it. + * Copyright (c) 2023 Google, LLC. <ardb@kernel.org> + * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au> */ +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/crypto.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/zlib.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <crypto/internal/scompress.h> #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION #define DEFLATE_DEF_WINBITS 11 #define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL -struct deflate_ctx { - struct z_stream_s comp_stream; - struct z_stream_s decomp_stream; +struct deflate_stream { + struct z_stream_s stream; + u8 workspace[]; }; -static int deflate_comp_init(struct deflate_ctx *ctx, int format) +static DEFINE_MUTEX(deflate_stream_lock); + +static void *deflate_alloc_stream(void) { - int ret = 0; - struct z_stream_s *stream = &ctx->comp_stream; + size_t size = max(zlib_inflate_workspacesize(), + zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, + DEFLATE_DEF_MEMLEVEL)); + struct deflate_stream *ctx; - stream->workspace = vzalloc(zlib_deflate_workspacesize( - MAX_WBITS, MAX_MEM_LEVEL)); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - if (format) - ret = zlib_deflateInit(stream, 3); - else - ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, - DEFLATE_DEF_MEMLEVEL, - Z_DEFAULT_STRATEGY); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; -} + ctx = kvmalloc(struct_size(ctx, workspace, size), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); -static int deflate_decomp_init(struct deflate_ctx *ctx, int format) -{ - int ret = 0; - struct z_stream_s *stream = &ctx->decomp_stream; + ctx->stream.workspace = ctx->workspace; - stream->workspace = vzalloc(zlib_inflate_workspacesize()); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - if (format) - ret = zlib_inflateInit(stream); - else - ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; + return ctx; } -static void deflate_comp_exit(struct deflate_ctx *ctx) +static void deflate_free_stream(void *ctx) { - zlib_deflateEnd(&ctx->comp_stream); - vfree(ctx->comp_stream.workspace); + kvfree(ctx); } -static void deflate_decomp_exit(struct deflate_ctx *ctx) -{ - zlib_inflateEnd(&ctx->decomp_stream); - vfree(ctx->decomp_stream.workspace); -} +static struct crypto_acomp_streams deflate_streams = { + .alloc_ctx = deflate_alloc_stream, + .free_ctx = deflate_free_stream, +}; -static int __deflate_init(void *ctx, int format) +static int deflate_compress_one(struct acomp_req *req, + struct deflate_stream *ds) { + struct z_stream_s *stream = &ds->stream; + struct acomp_walk walk; int ret; - ret = deflate_comp_init(ctx, format); + ret = acomp_walk_virt(&walk, req, true); if (ret) - goto out; - ret = deflate_decomp_init(ctx, format); - if (ret) - deflate_comp_exit(ctx); -out: - return ret; -} - -static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) -{ - struct deflate_ctx *ctx; - int ret; + return ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + do { + unsigned int dcur; - ret = __deflate_init(ctx, format); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } + dcur = acomp_walk_next_dst(&walk); + if (!dcur) + return -ENOSPC; - return ctx; -} + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 0); -} + do { + int flush = Z_FINISH; + unsigned int scur; -static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 1); -} + stream->avail_in = 0; + stream->next_in = NULL; -static int deflate_init(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + scur = acomp_walk_next_src(&walk); + if (scur) { + if (acomp_walk_more_src(&walk, scur)) + flush = Z_NO_FLUSH; + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } - return __deflate_init(ctx, 0); -} + ret = zlib_deflate(stream, flush); -static void __deflate_exit(void *ctx) -{ - deflate_comp_exit(ctx); - deflate_decomp_exit(ctx); -} + if (scur) { + scur -= stream->avail_in; + acomp_walk_done_src(&walk, scur); + } + } while (ret == Z_OK && stream->avail_out); -static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) -{ - __deflate_exit(ctx); - kfree_sensitive(ctx); -} + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK); -static void deflate_exit(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + if (ret != Z_STREAM_END) + return -EINVAL; - __deflate_exit(ctx); + req->dlen = stream->total_out; + return 0; } -static int __deflate_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_compress(struct acomp_req *req) { - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->comp_stream; - - ret = zlib_deflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; + + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; + + err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; + err = deflate_compress_one(req, ds); - ret = zlib_deflate(stream, Z_FINISH); - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int deflate_decompress_one(struct acomp_req *req, + struct deflate_stream *ds) { - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct z_stream_s *stream = &ds->stream; + bool out_of_space = false; + struct acomp_walk walk; + int ret; - return __deflate_compress(src, slen, dst, dlen, dctx); -} + ret = acomp_walk_virt(&walk, req, true); + if (ret) + return ret; -static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __deflate_compress(src, slen, dst, dlen, ctx); + do { + unsigned int scur; + + stream->avail_in = 0; + stream->next_in = NULL; + + scur = acomp_walk_next_src(&walk); + if (scur) { + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } + + do { + unsigned int dcur; + + dcur = acomp_walk_next_dst(&walk); + if (!dcur) { + out_of_space = true; + break; + } + + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; + + ret = zlib_inflate(stream, Z_NO_FLUSH); + + dcur -= stream->avail_out; + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK && stream->avail_in); + + if (scur) + acomp_walk_done_src(&walk, scur); + + if (out_of_space) + return -ENOSPC; + } while (ret == Z_OK); + + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dlen = stream->total_out; + return 0; } -static int __deflate_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_decompress(struct acomp_req *req) { + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->decomp_stream; + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; - ret = zlib_inflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; - - ret = zlib_inflate(stream, Z_SYNC_FLUSH); - /* - * Work around a bug in zlib, which sometimes wants to taste an extra - * byte when being used in the (undocumented) raw deflate mode. - * (From USAGI). - */ - if (ret == Z_OK && !stream->avail_in && stream->avail_out) { - u8 zerostuff = 0; - stream->next_in = &zerostuff; - stream->avail_in = 1; - ret = zlib_inflate(stream, Z_FINISH); - } - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; + err = deflate_decompress_one(req, ds); + out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int deflate_init(struct crypto_acomp *tfm) { - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + int ret; - return __deflate_decompress(src, slen, dst, dlen, dctx); -} + mutex_lock(&deflate_stream_lock); + ret = crypto_acomp_alloc_streams(&deflate_streams); + mutex_unlock(&deflate_stream_lock); -static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __deflate_decompress(src, slen, dst, dlen, ctx); + return ret; } -static struct crypto_alg alg = { - .cra_name = "deflate", - .cra_driver_name = "deflate-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct deflate_ctx), - .cra_module = THIS_MODULE, - .cra_init = deflate_init, - .cra_exit = deflate_exit, - .cra_u = { .compress = { - .coa_compress = deflate_compress, - .coa_decompress = deflate_decompress } } +static struct acomp_alg acomp = { + .compress = deflate_compress, + .decompress = deflate_decompress, + .init = deflate_init, + .base.cra_name = "deflate", + .base.cra_driver_name = "deflate-generic", + .base.cra_flags = CRYPTO_ALG_REQ_VIRT, + .base.cra_module = THIS_MODULE, }; -static struct scomp_alg scomp[] = { { - .alloc_ctx = deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", - .cra_module = THIS_MODULE, - } -}, { - .alloc_ctx = zlib_deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "zlib-deflate-scomp", - .cra_module = THIS_MODULE, - } -} }; - static int __init deflate_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp)); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_acomp(&acomp); } static void __exit deflate_mod_fini(void) { - crypto_unregister_alg(&alg); - crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp)); + crypto_unregister_acomp(&acomp); + crypto_acomp_free_streams(&deflate_streams); } -subsys_initcall(deflate_mod_init); +module_init(deflate_mod_init); module_exit(deflate_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>"); MODULE_ALIAS_CRYPTO("deflate"); +MODULE_ALIAS_CRYPTO("deflate-generic"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index c85354a5e94c..fce341400914 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -8,11 +8,11 @@ */ #include <asm/byteorder.h> +#include <crypto/algapi.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <crypto/internal/des.h> @@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void) crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs)); } -subsys_initcall(des_generic_mod_init); +module_init(des_generic_mod_init); module_exit(des_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/df_sp80090a.c b/crypto/df_sp80090a.c new file mode 100644 index 000000000000..dc63b31a93fc --- /dev/null +++ b/crypto/df_sp80090a.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * NIST SP800-90A DRBG derivation function + * + * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <crypto/aes.h> +#include <crypto/df_sp80090a.h> +#include <crypto/internal/drbg.h> + +static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx, + const unsigned char *key, + u8 keylen); +static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx, + const unsigned char *key, u8 keylen) +{ + aes_expandkey(aesctx, key, keylen); +} + +static void drbg_kcapi_sym(struct crypto_aes_ctx *aesctx, + unsigned char *outval, + const struct drbg_string *in, u8 blocklen_bytes) +{ + /* there is only component in *in */ + BUG_ON(in->len < blocklen_bytes); + aes_encrypt(aesctx, outval, in->buf); +} + +/* BCC function for CTR DRBG as defined in 10.4.3 */ + +static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx, + unsigned char *out, const unsigned char *key, + struct list_head *in, + u8 blocklen_bytes, + u8 keylen) +{ + struct drbg_string *curr = NULL; + struct drbg_string data; + short cnt = 0; + + drbg_string_fill(&data, out, blocklen_bytes); + + /* 10.4.3 step 2 / 4 */ + drbg_kcapi_symsetkey(aesctx, key, keylen); + list_for_each_entry(curr, in, list) { + const unsigned char *pos = curr->buf; + size_t len = curr->len; + /* 10.4.3 step 4.1 */ + while (len) { + /* 10.4.3 step 4.2 */ + if (blocklen_bytes == cnt) { + cnt = 0; + drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes); + } + out[cnt] ^= *pos; + pos++; + cnt++; + len--; + } + } + /* 10.4.3 step 4.2 for last block */ + if (cnt) + drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes); +} + +/* + * scratchpad usage: drbg_ctr_update is interlinked with crypto_drbg_ctr_df + * (and drbg_ctr_bcc, but this function does not need any temporary buffers), + * the scratchpad is used as follows: + * drbg_ctr_update: + * temp + * start: drbg->scratchpad + * length: drbg_statelen(drbg) + drbg_blocklen(drbg) + * note: the cipher writing into this variable works + * blocklen-wise. Now, when the statelen is not a multiple + * of blocklen, the generateion loop below "spills over" + * by at most blocklen. Thus, we need to give sufficient + * memory. + * df_data + * start: drbg->scratchpad + + * drbg_statelen(drbg) + drbg_blocklen(drbg) + * length: drbg_statelen(drbg) + * + * crypto_drbg_ctr_df: + * pad + * start: df_data + drbg_statelen(drbg) + * length: drbg_blocklen(drbg) + * iv + * start: pad + drbg_blocklen(drbg) + * length: drbg_blocklen(drbg) + * temp + * start: iv + drbg_blocklen(drbg) + * length: drbg_satelen(drbg) + drbg_blocklen(drbg) + * note: temp is the buffer that the BCC function operates + * on. BCC operates blockwise. drbg_statelen(drbg) + * is sufficient when the DRBG state length is a multiple + * of the block size. For AES192 (and maybe other ciphers) + * this is not correct and the length for temp is + * insufficient (yes, that also means for such ciphers, + * the final output of all BCC rounds are truncated). + * Therefore, add drbg_blocklen(drbg) to cover all + * possibilities. + * refer to crypto_drbg_ctr_df_datalen() to get required length + */ + +/* Derivation Function for CTR DRBG as defined in 10.4.2 */ +int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx, + unsigned char *df_data, size_t bytes_to_return, + struct list_head *seedlist, + u8 blocklen_bytes, + u8 statelen) +{ + unsigned char L_N[8]; + /* S3 is input */ + struct drbg_string S1, S2, S4, cipherin; + LIST_HEAD(bcc_list); + unsigned char *pad = df_data + statelen; + unsigned char *iv = pad + blocklen_bytes; + unsigned char *temp = iv + blocklen_bytes; + size_t padlen = 0; + unsigned int templen = 0; + /* 10.4.2 step 7 */ + unsigned int i = 0; + /* 10.4.2 step 8 */ + const unsigned char *K = (unsigned char *) + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"; + unsigned char *X; + size_t generated_len = 0; + size_t inputlen = 0; + struct drbg_string *seed = NULL; + u8 keylen; + + memset(pad, 0, blocklen_bytes); + memset(iv, 0, blocklen_bytes); + keylen = statelen - blocklen_bytes; + /* 10.4.2 step 1 is implicit as we work byte-wise */ + + /* 10.4.2 step 2 */ + if ((512 / 8) < bytes_to_return) + return -EINVAL; + + /* 10.4.2 step 2 -- calculate the entire length of all input data */ + list_for_each_entry(seed, seedlist, list) + inputlen += seed->len; + drbg_cpu_to_be32(inputlen, &L_N[0]); + + /* 10.4.2 step 3 */ + drbg_cpu_to_be32(bytes_to_return, &L_N[4]); + + /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ + padlen = (inputlen + sizeof(L_N) + 1) % (blocklen_bytes); + /* wrap the padlen appropriately */ + if (padlen) + padlen = blocklen_bytes - padlen; + /* + * pad / padlen contains the 0x80 byte and the following zero bytes. + * As the calculated padlen value only covers the number of zero + * bytes, this value has to be incremented by one for the 0x80 byte. + */ + padlen++; + pad[0] = 0x80; + + /* 10.4.2 step 4 -- first fill the linked list and then order it */ + drbg_string_fill(&S1, iv, blocklen_bytes); + list_add_tail(&S1.list, &bcc_list); + drbg_string_fill(&S2, L_N, sizeof(L_N)); + list_add_tail(&S2.list, &bcc_list); + list_splice_tail(seedlist, &bcc_list); + drbg_string_fill(&S4, pad, padlen); + list_add_tail(&S4.list, &bcc_list); + + /* 10.4.2 step 9 */ + while (templen < (keylen + (blocklen_bytes))) { + /* + * 10.4.2 step 9.1 - the padding is implicit as the buffer + * holds zeros after allocation -- even the increment of i + * is irrelevant as the increment remains within length of i + */ + drbg_cpu_to_be32(i, iv); + /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ + drbg_ctr_bcc(aesctx, temp + templen, K, &bcc_list, + blocklen_bytes, keylen); + /* 10.4.2 step 9.3 */ + i++; + templen += blocklen_bytes; + } + + /* 10.4.2 step 11 */ + X = temp + (keylen); + drbg_string_fill(&cipherin, X, blocklen_bytes); + + /* 10.4.2 step 12: overwriting of outval is implemented in next step */ + + /* 10.4.2 step 13 */ + drbg_kcapi_symsetkey(aesctx, temp, keylen); + while (generated_len < bytes_to_return) { + short blocklen = 0; + /* + * 10.4.2 step 13.1: the truncation of the key length is + * implicit as the key is only drbg_blocklen in size based on + * the implementation of the cipher function callback + */ + drbg_kcapi_sym(aesctx, X, &cipherin, blocklen_bytes); + blocklen = (blocklen_bytes < + (bytes_to_return - generated_len)) ? + blocklen_bytes : + (bytes_to_return - generated_len); + /* 10.4.2 step 13.2 and 14 */ + memcpy(df_data + generated_len, X, blocklen); + generated_len += blocklen; + } + + memset(iv, 0, blocklen_bytes); + memset(temp, 0, statelen + blocklen_bytes); + memset(pad, 0, blocklen_bytes); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_drbg_ctr_df); + +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); +MODULE_DESCRIPTION("Derivation Function conformant to SP800-90A"); diff --git a/crypto/dh.c b/crypto/dh.c index cd4f32092e5c..8250eeeebd0f 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -5,16 +5,16 @@ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> */ +#include <linux/fips.h> #include <linux/module.h> #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/dh.h> -#include <linux/fips.h> +#include <crypto/rng.h> #include <linux/mpi.h> struct dh_ctx { MPI p; /* Value is guaranteed to be set. */ - MPI q; /* Value is optional. */ MPI g; /* Value is guaranteed to be set. */ MPI xa; /* Value is guaranteed to be set. */ }; @@ -22,7 +22,6 @@ struct dh_ctx { static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); - mpi_free(ctx->q); mpi_free(ctx->g); mpi_free(ctx->xa); memset(ctx, 0, sizeof(*ctx)); @@ -47,6 +46,9 @@ static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm) static int dh_check_params_length(unsigned int p_len) { + if (fips_enabled) + return (p_len < 2048) ? -EINVAL : 0; + return (p_len < 1536) ? -EINVAL : 0; } @@ -59,12 +61,6 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) if (!ctx->p) return -EINVAL; - if (params->q && params->q_size) { - ctx->q = mpi_read_raw_data(params->q, params->q_size); - if (!ctx->q) - return -EINVAL; - } - ctx->g = mpi_read_raw_data(params->g, params->g_size); if (!ctx->g) return -EINVAL; @@ -101,14 +97,21 @@ err_clear_ctx: /* * SP800-56A public key verification: * - * * If Q is provided as part of the domain paramenters, a full validation - * according to SP800-56A section 5.6.2.3.1 is performed. + * * For the safe-prime groups in FIPS mode, Q can be computed + * trivially from P and a full validation according to SP800-56A + * section 5.6.2.3.1 is performed. * - * * If Q is not provided, a partial validation according to SP800-56A section - * 5.6.2.3.2 is performed. + * * For all other sets of group parameters, only a partial validation + * according to SP800-56A section 5.6.2.3.2 is performed. */ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) { + MPI val, q; + int ret; + + if (!fips_enabled) + return 0; + if (unlikely(!ctx->p)) return -EINVAL; @@ -116,33 +119,47 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * Step 1: Verify that 2 <= y <= p - 2. * * The upper limit check is actually y < p instead of y < p - 1 - * as the mpi_sub_ui function is yet missing. + * in order to save one mpi_sub_ui() invocation here. Note that + * p - 1 is the non-trivial element of the subgroup of order 2 and + * thus, the check on y^q below would fail if y == p - 1. */ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0) return -EINVAL; - /* Step 2: Verify that 1 = y^q mod p */ - if (ctx->q) { - MPI val = mpi_alloc(0); - int ret; + /* + * Step 2: Verify that 1 = y^q mod p + * + * For the safe-prime groups q = (p - 1)/2. + */ + val = mpi_alloc(0); + if (!val) + return -ENOMEM; - if (!val) - return -ENOMEM; + q = mpi_alloc(mpi_get_nlimbs(ctx->p)); + if (!q) { + mpi_free(val); + return -ENOMEM; + } - ret = mpi_powm(val, y, ctx->q, ctx->p); + /* + * ->p is odd, so no need to explicitly subtract one + * from it before shifting to the right. + */ + ret = mpi_rshift(q, ctx->p, 1) ?: + mpi_powm(val, y, q, ctx->p); - if (ret) { - mpi_free(val); - return ret; - } + mpi_free(q); + if (ret) { + mpi_free(val); + return ret; + } - ret = mpi_cmp_ui(val, 1); + ret = mpi_cmp_ui(val, 1); - mpi_free(val); + mpi_free(val); - if (ret != 0) - return -EINVAL; - } + if (ret != 0) + return -EINVAL; return 0; } @@ -260,17 +277,650 @@ static struct kpp_alg dh = { }, }; -static int dh_init(void) + +struct dh_safe_prime { + unsigned int max_strength; + unsigned int p_size; + const char *p; +}; + +static const char safe_prime_g[] = { 2 }; + +struct dh_safe_prime_instance_ctx { + struct crypto_kpp_spawn dh_spawn; + const struct dh_safe_prime *safe_prime; +}; + +struct dh_safe_prime_tfm_ctx { + struct crypto_kpp *dh_tfm; +}; + +static void dh_safe_prime_free_instance(struct kpp_instance *inst) +{ + struct dh_safe_prime_instance_ctx *ctx = kpp_instance_ctx(inst); + + crypto_drop_kpp(&ctx->dh_spawn); + kfree(inst); +} + +static inline struct dh_safe_prime_instance_ctx *dh_safe_prime_instance_ctx( + struct crypto_kpp *tfm) +{ + return kpp_instance_ctx(kpp_alg_instance(tfm)); +} + +static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_instance_ctx *inst_ctx = + dh_safe_prime_instance_ctx(tfm); + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + tfm_ctx->dh_tfm = crypto_spawn_kpp(&inst_ctx->dh_spawn); + if (IS_ERR(tfm_ctx->dh_tfm)) + return PTR_ERR(tfm_ctx->dh_tfm); + + kpp_set_reqsize(tfm, sizeof(struct kpp_request) + + crypto_kpp_reqsize(tfm_ctx->dh_tfm)); + + return 0; +} + +static void dh_safe_prime_exit_tfm(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + crypto_free_kpp(tfm_ctx->dh_tfm); +} + +static u64 __add_u64_to_be(__be64 *dst, unsigned int n, u64 val) +{ + unsigned int i; + + for (i = n; val && i > 0; --i) { + u64 tmp = be64_to_cpu(dst[i - 1]); + + tmp += val; + val = tmp >= val ? 0 : 1; + dst[i - 1] = cpu_to_be64(tmp); + } + + return val; +} + +static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime, + unsigned int *key_size) +{ + unsigned int n, oversampling_size; + __be64 *key; + int err; + u64 h, o; + + /* + * Generate a private key following NIST SP800-56Ar3, + * sec. 5.6.1.1.1 and 5.6.1.1.3 resp.. + * + * 5.6.1.1.1: choose key length N such that + * 2 * ->max_strength <= N <= log2(q) + 1 = ->p_size * 8 - 1 + * with q = (p - 1) / 2 for the safe-prime groups. + * Choose the lower bound's next power of two for N in order to + * avoid excessively large private keys while still + * maintaining some extra reserve beyond the bare minimum in + * most cases. Note that for each entry in safe_prime_groups[], + * the following holds for such N: + * - N >= 256, in particular it is a multiple of 2^6 = 64 + * bits and + * - N < log2(q) + 1, i.e. N respects the upper bound. + */ + n = roundup_pow_of_two(2 * safe_prime->max_strength); + WARN_ON_ONCE(n & ((1u << 6) - 1)); + n >>= 6; /* Convert N into units of u64. */ + + /* + * Reserve one extra u64 to hold the extra random bits + * required as per 5.6.1.1.3. + */ + oversampling_size = (n + 1) * sizeof(__be64); + key = kmalloc(oversampling_size, GFP_KERNEL); + if (!key) + return ERR_PTR(-ENOMEM); + + /* + * 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64 + * random bits and interpret them as a big endian integer. + */ + err = -EFAULT; + if (crypto_get_default_rng()) + goto out_err; + + err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key, + oversampling_size); + crypto_put_default_rng(); + if (err) + goto out_err; + + /* + * 5.6.1.1.3, step 5 is implicit: 2^N < q and thus, + * M = min(2^N, q) = 2^N. + * + * For step 6, calculate + * key = (key[] mod (M - 1)) + 1 = (key[] mod (2^N - 1)) + 1. + * + * In order to avoid expensive divisions, note that + * 2^N mod (2^N - 1) = 1 and thus, for any integer h, + * 2^N * h mod (2^N - 1) = h mod (2^N - 1) always holds. + * The big endian integer key[] composed of n + 1 64bit words + * may be written as key[] = h * 2^N + l, with h = key[0] + * representing the 64 most significant bits and l + * corresponding to the remaining 2^N bits. With the remark + * from above, + * h * 2^N + l mod (2^N - 1) = l + h mod (2^N - 1). + * As both, l and h are less than 2^N, their sum after + * this first reduction is guaranteed to be <= 2^(N + 1) - 2. + * Or equivalently, that their sum can again be written as + * h' * 2^N + l' with h' now either zero or one and if one, + * then l' <= 2^N - 2. Thus, all bits at positions >= N will + * be zero after a second reduction: + * h' * 2^N + l' mod (2^N - 1) = l' + h' mod (2^N - 1). + * At this point, it is still possible that + * l' + h' = 2^N - 1, i.e. that l' + h' mod (2^N - 1) + * is zero. This condition will be detected below by means of + * the final increment overflowing in this case. + */ + h = be64_to_cpu(key[0]); + h = __add_u64_to_be(key + 1, n, h); + h = __add_u64_to_be(key + 1, n, h); + WARN_ON_ONCE(h); + + /* Increment to obtain the final result. */ + o = __add_u64_to_be(key + 1, n, 1); + /* + * The overflow bit o from the increment is either zero or + * one. If zero, key[1:n] holds the final result in big-endian + * order. If one, key[1:n] is zero now, but needs to be set to + * one, c.f. above. + */ + if (o) + key[n] = cpu_to_be64(1); + + /* n is in units of u64, convert to bytes. */ + *key_size = n << 3; + /* Strip the leading extra __be64, which is (virtually) zero by now. */ + memmove(key, &key[1], *key_size); + + return key; + +out_err: + kfree_sensitive(key); + return ERR_PTR(err); +} + +static int dh_safe_prime_set_secret(struct crypto_kpp *tfm, const void *buffer, + unsigned int len) +{ + struct dh_safe_prime_instance_ctx *inst_ctx = + dh_safe_prime_instance_ctx(tfm); + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + struct dh params = {}; + void *buf = NULL, *key = NULL; + unsigned int buf_size; + int err; + + if (buffer) { + err = __crypto_dh_decode_key(buffer, len, ¶ms); + if (err) + return err; + if (params.p_size || params.g_size) + return -EINVAL; + } + + params.p = inst_ctx->safe_prime->p; + params.p_size = inst_ctx->safe_prime->p_size; + params.g = safe_prime_g; + params.g_size = sizeof(safe_prime_g); + + if (!params.key_size) { + key = dh_safe_prime_gen_privkey(inst_ctx->safe_prime, + ¶ms.key_size); + if (IS_ERR(key)) + return PTR_ERR(key); + params.key = key; + } + + buf_size = crypto_dh_key_len(¶ms); + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out; + } + + err = crypto_dh_encode_key(buf, buf_size, ¶ms); + if (err) + goto out; + + err = crypto_kpp_set_secret(tfm_ctx->dh_tfm, buf, buf_size); +out: + kfree_sensitive(buf); + kfree_sensitive(key); + return err; +} + +static void dh_safe_prime_complete_req(void *data, int err) +{ + struct kpp_request *req = data; + + kpp_request_complete(req, err); +} + +static struct kpp_request *dh_safe_prime_prepare_dh_req(struct kpp_request *req) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = + kpp_tfm_ctx(crypto_kpp_reqtfm(req)); + struct kpp_request *dh_req = kpp_request_ctx(req); + + kpp_request_set_tfm(dh_req, tfm_ctx->dh_tfm); + kpp_request_set_callback(dh_req, req->base.flags, + dh_safe_prime_complete_req, req); + + kpp_request_set_input(dh_req, req->src, req->src_len); + kpp_request_set_output(dh_req, req->dst, req->dst_len); + + return dh_req; +} + +static int dh_safe_prime_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); + + return crypto_kpp_generate_public_key(dh_req); +} + +static int dh_safe_prime_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); + + return crypto_kpp_compute_shared_secret(dh_req); +} + +static unsigned int dh_safe_prime_max_size(struct crypto_kpp *tfm) +{ + struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); + + return crypto_kpp_maxsize(tfm_ctx->dh_tfm); +} + +static int __maybe_unused __dh_safe_prime_create( + struct crypto_template *tmpl, struct rtattr **tb, + const struct dh_safe_prime *safe_prime) +{ + struct kpp_instance *inst; + struct dh_safe_prime_instance_ctx *ctx; + const char *dh_name; + struct kpp_alg *dh_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_KPP, &mask); + if (err) + return err; + + dh_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(dh_name)) + return PTR_ERR(dh_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = kpp_instance_ctx(inst); + + err = crypto_grab_kpp(&ctx->dh_spawn, kpp_crypto_instance(inst), + dh_name, 0, mask); + if (err) + goto err_free_inst; + + err = -EINVAL; + dh_alg = crypto_spawn_kpp_alg(&ctx->dh_spawn); + if (strcmp(dh_alg->base.cra_name, "dh")) + goto err_free_inst; + + ctx->safe_prime = safe_prime; + + err = crypto_inst_setname(kpp_crypto_instance(inst), + tmpl->name, &dh_alg->base); + if (err) + goto err_free_inst; + + inst->alg.set_secret = dh_safe_prime_set_secret; + inst->alg.generate_public_key = dh_safe_prime_generate_public_key; + inst->alg.compute_shared_secret = dh_safe_prime_compute_shared_secret; + inst->alg.max_size = dh_safe_prime_max_size; + inst->alg.init = dh_safe_prime_init_tfm; + inst->alg.exit = dh_safe_prime_exit_tfm; + inst->alg.base.cra_priority = dh_alg->base.cra_priority; + inst->alg.base.cra_module = THIS_MODULE; + inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx); + + inst->free = dh_safe_prime_free_instance; + + err = kpp_register_instance(tmpl, inst); + if (err) + goto err_free_inst; + + return 0; + +err_free_inst: + dh_safe_prime_free_instance(inst); + + return err; +} + +#ifdef CONFIG_CRYPTO_DH_RFC7919_GROUPS + +static const struct dh_safe_prime ffdhe2048_prime = { + .max_strength = 112, + .p_size = 256, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x28\x5c\x97\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe3072_prime = { + .max_strength = 128, + .p_size = 384, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\xc6\x2e\x37\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe4096_prime = { + .max_strength = 152, + .p_size = 512, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x65\x5f\x6a\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe6144_prime = { + .max_strength = 176, + .p_size = 768, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" + "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" + "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" + "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" + "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" + "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" + "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" + "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" + "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" + "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" + "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" + "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" + "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" + "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" + "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" + "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" + "\xa4\x0e\x32\x9c\xd0\xe4\x0e\x65\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static const struct dh_safe_prime ffdhe8192_prime = { + .max_strength = 200, + .p_size = 1024, + .p = + "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" + "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" + "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" + "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" + "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" + "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" + "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" + "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" + "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" + "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" + "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" + "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" + "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" + "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" + "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" + "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" + "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" + "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" + "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" + "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" + "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" + "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" + "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" + "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" + "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" + "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" + "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" + "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" + "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" + "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" + "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" + "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" + "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" + "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" + "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" + "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" + "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" + "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" + "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" + "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" + "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" + "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" + "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" + "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" + "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" + "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" + "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" + "\xa4\x0e\x32\x9c\xcf\xf4\x6a\xaa\x36\xad\x00\x4c\xf6\x00\xc8\x38" + "\x1e\x42\x5a\x31\xd9\x51\xae\x64\xfd\xb2\x3f\xce\xc9\x50\x9d\x43" + "\x68\x7f\xeb\x69\xed\xd1\xcc\x5e\x0b\x8c\xc3\xbd\xf6\x4b\x10\xef" + "\x86\xb6\x31\x42\xa3\xab\x88\x29\x55\x5b\x2f\x74\x7c\x93\x26\x65" + "\xcb\x2c\x0f\x1c\xc0\x1b\xd7\x02\x29\x38\x88\x39\xd2\xaf\x05\xe4" + "\x54\x50\x4a\xc7\x8b\x75\x82\x82\x28\x46\xc0\xba\x35\xc3\x5f\x5c" + "\x59\x16\x0c\xc0\x46\xfd\x82\x51\x54\x1f\xc6\x8c\x9c\x86\xb0\x22" + "\xbb\x70\x99\x87\x6a\x46\x0e\x74\x51\xa8\xa9\x31\x09\x70\x3f\xee" + "\x1c\x21\x7e\x6c\x38\x26\xe5\x2c\x51\xaa\x69\x1e\x0e\x42\x3c\xfc" + "\x99\xe9\xe3\x16\x50\xc1\x21\x7b\x62\x48\x16\xcd\xad\x9a\x95\xf9" + "\xd5\xb8\x01\x94\x88\xd9\xc0\xa0\xa1\xfe\x30\x75\xa5\x77\xe2\x31" + "\x83\xf8\x1d\x4a\x3f\x2f\xa4\x57\x1e\xfc\x8c\xe0\xba\x8a\x4f\xe8" + "\xb6\x85\x5d\xfe\x72\xb0\xa6\x6e\xde\xd2\xfb\xab\xfb\xe5\x8a\x30" + "\xfa\xfa\xbe\x1c\x5d\x71\xa8\x7e\x2f\x74\x1e\xf8\xc1\xfe\x86\xfe" + "\xa6\xbb\xfd\xe5\x30\x67\x7f\x0d\x97\xd1\x1d\x49\xf7\xa8\x44\x3d" + "\x08\x22\xe5\x06\xa9\xf4\x61\x4e\x01\x1e\x2a\x94\x83\x8f\xf8\x8c" + "\xd6\x8c\x8b\xb7\xc5\xc6\x42\x4c\xff\xff\xff\xff\xff\xff\xff\xff", +}; + +static int dh_ffdhe2048_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe2048_prime); +} + +static int dh_ffdhe3072_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe3072_prime); +} + +static int dh_ffdhe4096_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe4096_prime); +} + +static int dh_ffdhe6144_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe6144_prime); +} + +static int dh_ffdhe8192_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + return __dh_safe_prime_create(tmpl, tb, &ffdhe8192_prime); +} + +static struct crypto_template crypto_ffdhe_templates[] = { + { + .name = "ffdhe2048", + .create = dh_ffdhe2048_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe3072", + .create = dh_ffdhe3072_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe4096", + .create = dh_ffdhe4096_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe6144", + .create = dh_ffdhe6144_create, + .module = THIS_MODULE, + }, + { + .name = "ffdhe8192", + .create = dh_ffdhe8192_create, + .module = THIS_MODULE, + }, +}; + +#else /* ! CONFIG_CRYPTO_DH_RFC7919_GROUPS */ + +static struct crypto_template crypto_ffdhe_templates[] = {}; + +#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ + + +static int __init dh_init(void) { - return crypto_register_kpp(&dh); + int err; + + err = crypto_register_kpp(&dh); + if (err) + return err; + + err = crypto_register_templates(crypto_ffdhe_templates, + ARRAY_SIZE(crypto_ffdhe_templates)); + if (err) { + crypto_unregister_kpp(&dh); + return err; + } + + return 0; } -static void dh_exit(void) +static void __exit dh_exit(void) { + crypto_unregister_templates(crypto_ffdhe_templates, + ARRAY_SIZE(crypto_ffdhe_templates)); crypto_unregister_kpp(&dh); } -subsys_initcall(dh_init); +module_init(dh_init); module_exit(dh_exit); MODULE_ALIAS_CRYPTO("dh"); MODULE_LICENSE("GPL"); diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 9fd5a42eea15..2d499879328b 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -10,7 +10,7 @@ #include <crypto/dh.h> #include <crypto/kpp.h> -#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int)) +#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int)) static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size) { @@ -28,7 +28,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) static inline unsigned int dh_data_size(const struct dh *p) { - return p->key_size + p->p_size + p->q_size + p->g_size; + return p->key_size + p->p_size + p->g_size; } unsigned int crypto_dh_key_len(const struct dh *p) @@ -53,11 +53,9 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) ptr = dh_pack_data(ptr, end, ¶ms->key_size, sizeof(params->key_size)); ptr = dh_pack_data(ptr, end, ¶ms->p_size, sizeof(params->p_size)); - ptr = dh_pack_data(ptr, end, ¶ms->q_size, sizeof(params->q_size)); ptr = dh_pack_data(ptr, end, ¶ms->g_size, sizeof(params->g_size)); ptr = dh_pack_data(ptr, end, params->key, params->key_size); ptr = dh_pack_data(ptr, end, params->p, params->p_size); - ptr = dh_pack_data(ptr, end, params->q, params->q_size); ptr = dh_pack_data(ptr, end, params->g, params->g_size); if (ptr != end) return -EINVAL; @@ -65,7 +63,7 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) } EXPORT_SYMBOL_GPL(crypto_dh_encode_key); -int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) +int __crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) { const u8 *ptr = buf; struct kpp_secret secret; @@ -79,28 +77,36 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size)); - ptr = dh_unpack_data(¶ms->q_size, ptr, sizeof(params->q_size)); ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size)); if (secret.len != crypto_dh_key_len(params)) return -EINVAL; + /* Don't allocate memory. Set pointers to data within + * the given buffer + */ + params->key = (void *)ptr; + params->p = (void *)(ptr + params->key_size); + params->g = (void *)(ptr + params->key_size + params->p_size); + + return 0; +} + +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) +{ + int err; + + err = __crypto_dh_decode_key(buf, len, params); + if (err) + return err; + /* * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since * some drivers assume otherwise. */ if (params->key_size > params->p_size || - params->g_size > params->p_size || params->q_size > params->p_size) + params->g_size > params->p_size) return -EINVAL; - /* Don't allocate memory. Set pointers to data within - * the given buffer - */ - params->key = (void *)ptr; - params->p = (void *)(ptr + params->key_size); - params->q = (void *)(ptr + params->key_size + params->p_size); - params->g = (void *)(ptr + params->key_size + params->p_size + - params->q_size); - /* * Don't permit 'p' to be 0. It's not a prime number, and it's subject * to corner cases such as 'mod 0' being undefined or @@ -109,10 +115,6 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (memchr_inv(params->p, 0, params->p_size) == NULL) return -EINVAL; - /* It is permissible to not provide Q. */ - if (params->q_size == 0) - params->q = NULL; - return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/drbg.c b/crypto/drbg.c index ea85d4a0fe9e..1d433dae9955 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -98,8 +98,11 @@ */ #include <crypto/drbg.h> +#include <crypto/df_sp80090a.h> #include <crypto/internal/cipher.h> #include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/string_choices.h> /*************************************************************** * Backend cipher definitions available to DRBG @@ -110,9 +113,9 @@ * as stdrng. Each DRBG receives an increasing cra_priority values the later * they are defined in this array (see drbg_fill_array). * - * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and - * the SHA256 / AES 256 over other ciphers. Thus, the favored - * DRBGs are the latest entries in this array. + * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the + * HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the + * favored DRBGs are the latest entries in this array. */ static const struct drbg_core drbg_cores[] = { #ifdef CONFIG_CRYPTO_DRBG_CTR @@ -138,12 +141,6 @@ static const struct drbg_core drbg_cores[] = { #endif /* CONFIG_CRYPTO_DRBG_CTR */ #ifdef CONFIG_CRYPTO_DRBG_HASH { - .flags = DRBG_HASH | DRBG_STRENGTH128, - .statelen = 55, /* 440 bits */ - .blocklen_bytes = 20, - .cra_name = "sha1", - .backend_cra_name = "sha1", - }, { .flags = DRBG_HASH | DRBG_STRENGTH256, .statelen = 111, /* 888 bits */ .blocklen_bytes = 48, @@ -165,12 +162,6 @@ static const struct drbg_core drbg_cores[] = { #endif /* CONFIG_CRYPTO_DRBG_HASH */ #ifdef CONFIG_CRYPTO_DRBG_HMAC { - .flags = DRBG_HMAC | DRBG_STRENGTH128, - .statelen = 20, /* block length of cipher */ - .blocklen_bytes = 20, - .cra_name = "hmac_sha1", - .backend_cra_name = "hmac(sha1)", - }, { .flags = DRBG_HMAC | DRBG_STRENGTH256, .statelen = 48, /* block length of cipher */ .blocklen_bytes = 48, @@ -271,26 +262,6 @@ static int drbg_fips_continuous_test(struct drbg_state *drbg, return 0; } -/* - * Convert an integer into a byte representation of this integer. - * The byte representation is big-endian - * - * @val value to be converted - * @buf buffer holding the converted integer -- caller must ensure that - * buffer size is at least 32 bit - */ -#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR)) -static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf) -{ - struct s { - __be32 conv; - }; - struct s *conversion = (struct s *) buf; - - conversion->conv = cpu_to_be32(val); -} -#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */ - /****************************************************************** * CTR DRBG callback functions ******************************************************************/ @@ -304,10 +275,6 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192"); MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128"); MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128"); -static void drbg_kcapi_symsetkey(struct drbg_state *drbg, - const unsigned char *key); -static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, - const struct drbg_string *in); static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg); static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, @@ -315,202 +282,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen); #define DRBG_OUTSCRATCHLEN 256 -/* BCC function for CTR DRBG as defined in 10.4.3 */ -static int drbg_ctr_bcc(struct drbg_state *drbg, - unsigned char *out, const unsigned char *key, - struct list_head *in) -{ - int ret = 0; - struct drbg_string *curr = NULL; - struct drbg_string data; - short cnt = 0; - - drbg_string_fill(&data, out, drbg_blocklen(drbg)); - - /* 10.4.3 step 2 / 4 */ - drbg_kcapi_symsetkey(drbg, key); - list_for_each_entry(curr, in, list) { - const unsigned char *pos = curr->buf; - size_t len = curr->len; - /* 10.4.3 step 4.1 */ - while (len) { - /* 10.4.3 step 4.2 */ - if (drbg_blocklen(drbg) == cnt) { - cnt = 0; - ret = drbg_kcapi_sym(drbg, out, &data); - if (ret) - return ret; - } - out[cnt] ^= *pos; - pos++; - cnt++; - len--; - } - } - /* 10.4.3 step 4.2 for last block */ - if (cnt) - ret = drbg_kcapi_sym(drbg, out, &data); - - return ret; -} - -/* - * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df - * (and drbg_ctr_bcc, but this function does not need any temporary buffers), - * the scratchpad is used as follows: - * drbg_ctr_update: - * temp - * start: drbg->scratchpad - * length: drbg_statelen(drbg) + drbg_blocklen(drbg) - * note: the cipher writing into this variable works - * blocklen-wise. Now, when the statelen is not a multiple - * of blocklen, the generateion loop below "spills over" - * by at most blocklen. Thus, we need to give sufficient - * memory. - * df_data - * start: drbg->scratchpad + - * drbg_statelen(drbg) + drbg_blocklen(drbg) - * length: drbg_statelen(drbg) - * - * drbg_ctr_df: - * pad - * start: df_data + drbg_statelen(drbg) - * length: drbg_blocklen(drbg) - * iv - * start: pad + drbg_blocklen(drbg) - * length: drbg_blocklen(drbg) - * temp - * start: iv + drbg_blocklen(drbg) - * length: drbg_satelen(drbg) + drbg_blocklen(drbg) - * note: temp is the buffer that the BCC function operates - * on. BCC operates blockwise. drbg_statelen(drbg) - * is sufficient when the DRBG state length is a multiple - * of the block size. For AES192 (and maybe other ciphers) - * this is not correct and the length for temp is - * insufficient (yes, that also means for such ciphers, - * the final output of all BCC rounds are truncated). - * Therefore, add drbg_blocklen(drbg) to cover all - * possibilities. - */ - -/* Derivation Function for CTR DRBG as defined in 10.4.2 */ static int drbg_ctr_df(struct drbg_state *drbg, unsigned char *df_data, size_t bytes_to_return, struct list_head *seedlist) { - int ret = -EFAULT; - unsigned char L_N[8]; - /* S3 is input */ - struct drbg_string S1, S2, S4, cipherin; - LIST_HEAD(bcc_list); - unsigned char *pad = df_data + drbg_statelen(drbg); - unsigned char *iv = pad + drbg_blocklen(drbg); - unsigned char *temp = iv + drbg_blocklen(drbg); - size_t padlen = 0; - unsigned int templen = 0; - /* 10.4.2 step 7 */ - unsigned int i = 0; - /* 10.4.2 step 8 */ - const unsigned char *K = (unsigned char *) - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"; - unsigned char *X; - size_t generated_len = 0; - size_t inputlen = 0; - struct drbg_string *seed = NULL; - - memset(pad, 0, drbg_blocklen(drbg)); - memset(iv, 0, drbg_blocklen(drbg)); - - /* 10.4.2 step 1 is implicit as we work byte-wise */ - - /* 10.4.2 step 2 */ - if ((512/8) < bytes_to_return) - return -EINVAL; - - /* 10.4.2 step 2 -- calculate the entire length of all input data */ - list_for_each_entry(seed, seedlist, list) - inputlen += seed->len; - drbg_cpu_to_be32(inputlen, &L_N[0]); - - /* 10.4.2 step 3 */ - drbg_cpu_to_be32(bytes_to_return, &L_N[4]); - - /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ - padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg)); - /* wrap the padlen appropriately */ - if (padlen) - padlen = drbg_blocklen(drbg) - padlen; - /* - * pad / padlen contains the 0x80 byte and the following zero bytes. - * As the calculated padlen value only covers the number of zero - * bytes, this value has to be incremented by one for the 0x80 byte. - */ - padlen++; - pad[0] = 0x80; - - /* 10.4.2 step 4 -- first fill the linked list and then order it */ - drbg_string_fill(&S1, iv, drbg_blocklen(drbg)); - list_add_tail(&S1.list, &bcc_list); - drbg_string_fill(&S2, L_N, sizeof(L_N)); - list_add_tail(&S2.list, &bcc_list); - list_splice_tail(seedlist, &bcc_list); - drbg_string_fill(&S4, pad, padlen); - list_add_tail(&S4.list, &bcc_list); - - /* 10.4.2 step 9 */ - while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) { - /* - * 10.4.2 step 9.1 - the padding is implicit as the buffer - * holds zeros after allocation -- even the increment of i - * is irrelevant as the increment remains within length of i - */ - drbg_cpu_to_be32(i, iv); - /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ - ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list); - if (ret) - goto out; - /* 10.4.2 step 9.3 */ - i++; - templen += drbg_blocklen(drbg); - } - - /* 10.4.2 step 11 */ - X = temp + (drbg_keylen(drbg)); - drbg_string_fill(&cipherin, X, drbg_blocklen(drbg)); - - /* 10.4.2 step 12: overwriting of outval is implemented in next step */ - - /* 10.4.2 step 13 */ - drbg_kcapi_symsetkey(drbg, temp); - while (generated_len < bytes_to_return) { - short blocklen = 0; - /* - * 10.4.2 step 13.1: the truncation of the key length is - * implicit as the key is only drbg_blocklen in size based on - * the implementation of the cipher function callback - */ - ret = drbg_kcapi_sym(drbg, X, &cipherin); - if (ret) - goto out; - blocklen = (drbg_blocklen(drbg) < - (bytes_to_return - generated_len)) ? - drbg_blocklen(drbg) : - (bytes_to_return - generated_len); - /* 10.4.2 step 13.2 and 14 */ - memcpy(df_data + generated_len, X, blocklen); - generated_len += blocklen; - } - - ret = 0; - -out: - memset(iv, 0, drbg_blocklen(drbg)); - memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); - memset(pad, 0, drbg_blocklen(drbg)); - return ret; + return crypto_drbg_ctr_df(drbg->priv_data, df_data, drbg_statelen(drbg), + seedlist, drbg_blocklen(drbg), drbg_statelen(drbg)); } /* @@ -647,8 +424,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384"); MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384"); MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256"); MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256"); -MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1"); -MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1"); /* update function of HMAC DRBG as defined in 10.1.2.2 */ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed, @@ -767,8 +542,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_sha384"); MODULE_ALIAS_CRYPTO("drbg_nopr_sha384"); MODULE_ALIAS_CRYPTO("drbg_pr_sha256"); MODULE_ALIAS_CRYPTO("drbg_nopr_sha256"); -MODULE_ALIAS_CRYPTO("drbg_pr_sha1"); -MODULE_ALIAS_CRYPTO("drbg_nopr_sha1"); /* * Increment buffer @@ -1036,17 +809,39 @@ static const struct drbg_state_ops drbg_hash_ops = { ******************************************************************/ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, - int reseed) + int reseed, enum drbg_seed_state new_seed_state) { int ret = drbg->d_ops->update(drbg, seed, reseed); if (ret) return ret; - drbg->seeded = true; + drbg->seeded = new_seed_state; + drbg->last_seed_time = jiffies; /* 10.1.1.2 / 10.1.1.3 step 5 */ drbg->reseed_ctr = 1; + switch (drbg->seeded) { + case DRBG_SEED_STATE_UNSEEDED: + /* Impossible, but handle it to silence compiler warnings. */ + fallthrough; + case DRBG_SEED_STATE_PARTIAL: + /* + * Require frequent reseeds until the seed source is + * fully initialized. + */ + drbg->reseed_threshold = 50; + break; + + case DRBG_SEED_STATE_FULL: + /* + * Seed source has become fully initialized, frequent + * reseeds no longer required. + */ + drbg->reseed_threshold = drbg_max_requests(drbg); + break; + } + return ret; } @@ -1066,12 +861,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg, return 0; } -static void drbg_async_seed(struct work_struct *work) +static int drbg_seed_from_random(struct drbg_state *drbg) { struct drbg_string data; LIST_HEAD(seedlist); - struct drbg_state *drbg = container_of(work, struct drbg_state, - seed_work); unsigned int entropylen = drbg_sec_strength(drbg->core->flags); unsigned char entropy[32]; int ret; @@ -1082,26 +875,35 @@ static void drbg_async_seed(struct work_struct *work) drbg_string_fill(&data, entropy, entropylen); list_add_tail(&data.list, &seedlist); - mutex_lock(&drbg->drbg_mutex); - ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) - goto unlock; + goto out; - /* Set seeded to false so that if __drbg_seed fails the - * next generate call will trigger a reseed. - */ - drbg->seeded = false; + ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL); - __drbg_seed(drbg, &seedlist, true); +out: + memzero_explicit(entropy, entropylen); + return ret; +} - if (drbg->seeded) - drbg->reseed_threshold = drbg_max_requests(drbg); +static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg) +{ + unsigned long next_reseed; -unlock: - mutex_unlock(&drbg->drbg_mutex); + /* Don't ever reseed from get_random_bytes() in test mode. */ + if (list_empty(&drbg->test_data.list)) + return false; - memzero_explicit(entropy, entropylen); + /* + * Obtain fresh entropy for the nopr DRBGs after 300s have + * elapsed in order to still achieve sort of partial + * prediction resistance over the time domain at least. Note + * that the period of 300s has been chosen to match the + * CRNG_RESEED_INTERVAL of the get_random_bytes()' chacha + * rngs. + */ + next_reseed = drbg->last_seed_time + 300 * HZ; + return time_after(jiffies, next_reseed); } /* @@ -1123,6 +925,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, unsigned int entropylen = drbg_sec_strength(drbg->core->flags); struct drbg_string data1; LIST_HEAD(seedlist); + enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL; /* 9.1 / 9.2 / 9.3.1 step 3 */ if (pers && pers->len > (drbg_max_addtl(drbg))) { @@ -1150,6 +953,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, BUG_ON((entropylen * 2) > sizeof(entropy)); /* Get seed from in-kernel /dev/urandom */ + if (!rng_is_initialized()) + new_seed_state = DRBG_SEED_STATE_PARTIAL; + ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) goto out; @@ -1159,11 +965,14 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", entropylen); } else { - /* Get seed from Jitter RNG */ + /* + * Get seed from Jitter RNG, failures are + * fatal only in FIPS mode. + */ ret = crypto_rng_get_bytes(drbg->jent, entropy + entropylen, entropylen); - if (ret) { + if (fips_enabled && ret) { pr_devel("DRBG: jent failed with %d\n", ret); /* @@ -1206,7 +1015,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, memset(drbg->C, 0, drbg_statelen(drbg)); } - ret = __drbg_seed(drbg, &seedlist, reseed); + ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state); out: memzero_explicit(entropy, entropylen * 2); @@ -1288,10 +1097,8 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) sb_size = 0; else if (drbg->core->flags & DRBG_CTR) sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */ - drbg_statelen(drbg) + /* df_data */ - drbg_blocklen(drbg) + /* pad */ - drbg_blocklen(drbg) + /* iv */ - drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */ + crypto_drbg_ctr_df_datalen(drbg_statelen(drbg), + drbg_blocklen(drbg)); else sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); @@ -1386,19 +1193,26 @@ static int drbg_generate(struct drbg_state *drbg, * here. The spec is a bit convoluted here, we make it simpler. */ if (drbg->reseed_threshold < drbg->reseed_ctr) - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; - if (drbg->pr || !drbg->seeded) { + if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", - drbg->pr ? "true" : "false", - drbg->seeded ? "seeded" : "unseeded"); + str_true_false(drbg->pr), + (drbg->seeded == DRBG_SEED_STATE_FULL ? + "seeded" : "unseeded")); /* 9.3.1 steps 7.1 through 7.3 */ len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; + } else if (rng_is_initialized() && + (drbg->seeded == DRBG_SEED_STATE_PARTIAL || + drbg_nopr_reseed_interval_elapsed(drbg))) { + len = drbg_seed_from_random(drbg); + if (len) + goto err; } if (addtl && 0 < addtl->len) @@ -1431,11 +1245,11 @@ static int drbg_generate(struct drbg_state *drbg, int err = 0; pr_devel("DRBG: start to perform self test\n"); if (drbg->core->flags & DRBG_HMAC) - err = alg_test("drbg_pr_hmac_sha256", - "drbg_pr_hmac_sha256", 0, 0); + err = alg_test("drbg_pr_hmac_sha512", + "drbg_pr_hmac_sha512", 0, 0); else if (drbg->core->flags & DRBG_CTR) - err = alg_test("drbg_pr_ctr_aes128", - "drbg_pr_ctr_aes128", 0, 0); + err = alg_test("drbg_pr_ctr_aes256", + "drbg_pr_ctr_aes256", 0, 0); else err = alg_test("drbg_pr_sha256", "drbg_pr_sha256", 0, 0); @@ -1491,51 +1305,23 @@ static int drbg_generate_long(struct drbg_state *drbg, return 0; } -static void drbg_schedule_async_seed(struct random_ready_callback *rdy) -{ - struct drbg_state *drbg = container_of(rdy, struct drbg_state, - random_ready); - - schedule_work(&drbg->seed_work); -} - static int drbg_prepare_hrng(struct drbg_state *drbg) { - int err; - /* We do not need an HRNG in test mode. */ if (list_empty(&drbg->test_data.list)) return 0; drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + if (IS_ERR(drbg->jent)) { + const int err = PTR_ERR(drbg->jent); - INIT_WORK(&drbg->seed_work, drbg_async_seed); - - drbg->random_ready.owner = THIS_MODULE; - drbg->random_ready.func = drbg_schedule_async_seed; - - err = add_random_ready_callback(&drbg->random_ready); - - switch (err) { - case 0: - break; - - case -EALREADY: - err = 0; - fallthrough; - - default: - drbg->random_ready.func = NULL; - return err; + drbg->jent = NULL; + if (fips_enabled) + return err; + pr_info("DRBG: Continuing without Jitter RNG\n"); } - /* - * Require frequent reseeds until the seed source is fully - * initialized. - */ - drbg->reseed_threshold = 50; - - return err; + return 0; } /* @@ -1562,7 +1348,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, bool reseed = true; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " - "%s\n", coreref, pr ? "enabled" : "disabled"); + "%s\n", coreref, str_enabled_disabled(pr)); mutex_lock(&drbg->drbg_mutex); /* 9.1 step 1 is implicit with the selected DRBG type */ @@ -1578,7 +1364,8 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (!drbg->core) { drbg->core = &drbg_cores[coreref]; drbg->pr = pr; - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; + drbg->last_seed_time = 0; drbg->reseed_threshold = drbg_max_requests(drbg); ret = drbg_alloc_state(drbg); @@ -1589,14 +1376,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (ret) goto free_everything; - if (IS_ERR(drbg->jent)) { - ret = PTR_ERR(drbg->jent); - drbg->jent = NULL; - if (fips_enabled || ret != -ENOENT) - goto free_everything; - pr_info("DRBG: Continuing without Jitter RNG\n"); - } - reseed = false; } @@ -1629,11 +1408,6 @@ free_everything: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - if (drbg->random_ready.func) { - del_random_ready_callback(&drbg->random_ready); - cancel_work_sync(&drbg->seed_work); - } - if (!IS_ERR_OR_NULL(drbg->jent)) crypto_free_rng(drbg->jent); drbg->jent = NULL; @@ -1669,7 +1443,6 @@ static void drbg_kcapi_set_entropy(struct crypto_rng *tfm, #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) struct sdesc { struct shash_desc shash; - char ctx[]; }; static int drbg_init_hash_kernel(struct drbg_state *drbg) @@ -1694,12 +1467,12 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) sdesc->shash.tfm = tfm; drbg->priv_data = sdesc; - return crypto_shash_alignmask(tfm); + return 0; } static int drbg_fini_hash_kernel(struct drbg_state *drbg) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); kfree_sensitive(sdesc); @@ -1711,7 +1484,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg) static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, const unsigned char *key) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); } @@ -1719,7 +1492,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, const struct list_head *in) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; struct drbg_string *input = NULL; crypto_shash_init(&sdesc->shash); @@ -1732,10 +1505,9 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, #ifdef CONFIG_CRYPTO_DRBG_CTR static int drbg_fini_sym_kernel(struct drbg_state *drbg) { - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; - if (tfm) - crypto_free_cipher(tfm); + struct crypto_aes_ctx *aesctx = (struct crypto_aes_ctx *)drbg->priv_data; + + kfree(aesctx); drbg->priv_data = NULL; if (drbg->ctr_handle) @@ -1754,20 +1526,16 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) static int drbg_init_sym_kernel(struct drbg_state *drbg) { - struct crypto_cipher *tfm; + struct crypto_aes_ctx *aesctx; struct crypto_skcipher *sk_tfm; struct skcipher_request *req; unsigned int alignmask; char ctr_name[CRYPTO_MAX_ALG_NAME]; - tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); - if (IS_ERR(tfm)) { - pr_info("DRBG: could not allocate cipher TFM handle: %s\n", - drbg->core->backend_cra_name); - return PTR_ERR(tfm); - } - BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); - drbg->priv_data = tfm; + aesctx = kzalloc(sizeof(*aesctx), GFP_KERNEL); + if (!aesctx) + return -ENOMEM; + drbg->priv_data = aesctx; if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) { @@ -1811,27 +1579,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return alignmask; } -static void drbg_kcapi_symsetkey(struct drbg_state *drbg, - const unsigned char *key) -{ - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; - - crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); -} - -static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, - const struct drbg_string *in) -{ - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; - - /* there is only component in *in */ - BUG_ON(in->len < drbg_blocklen(drbg)); - crypto_cipher_encrypt_one(tfm, outval, in->buf); - return 0; -} - static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *inbuf, u32 inlen, u8 *outbuf, u32 outlen) @@ -2003,7 +1750,7 @@ static inline int __init drbg_healthcheck_sanity(void) #define OUTBUFLEN 16 unsigned char buf[OUTBUFLEN]; struct drbg_state *drbg = NULL; - int ret = -EFAULT; + int ret; int rc = -EFAULT; bool pr = false; int coreref = 0; @@ -2015,11 +1762,13 @@ static inline int __init drbg_healthcheck_sanity(void) return 0; #ifdef CONFIG_CRYPTO_DRBG_CTR - drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr); -#elif defined CONFIG_CRYPTO_DRBG_HASH + drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr); +#endif +#ifdef CONFIG_CRYPTO_DRBG_HASH drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr); -#else - drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr); +#endif +#ifdef CONFIG_CRYPTO_DRBG_HMAC + drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr); #endif drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); @@ -2143,7 +1892,7 @@ static void __exit drbg_exit(void) crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } -subsys_initcall(drbg_init); +module_init(drbg_init); module_exit(drbg_exit); #ifndef CRYPTO_DRBG_HASH_STRING #define CRYPTO_DRBG_HASH_STRING "" @@ -2162,4 +1911,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " CRYPTO_DRBG_HMAC_STRING CRYPTO_DRBG_CTR_STRING); MODULE_ALIAS_CRYPTO("stdrng"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/ecb.c b/crypto/ecb.c index 71fbb0543d64..cd1b20456dad 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -5,75 +5,198 @@ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <crypto/algapi.h> #include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> -static int crypto_ecb_crypt(struct skcipher_request *req, - struct crypto_cipher *cipher, +static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src, + u8 *dst, unsigned nbytes, bool final, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { const unsigned int bsize = crypto_cipher_blocksize(cipher); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes) != 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; + while (nbytes >= bsize) { + fn(crypto_cipher_tfm(cipher), dst, src); - do { - fn(crypto_cipher_tfm(cipher), dst, src); + src += bsize; + dst += bsize; - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - err = skcipher_walk_done(&walk, nbytes); + nbytes -= bsize; } - return err; + return nbytes && final ? -EINVAL : nbytes; } -static int crypto_ecb_encrypt(struct skcipher_request *req) +static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, u32 flags) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, + flags & CRYPTO_LSKCIPHER_FLAG_FINAL, crypto_cipher_alg(cipher)->cia_encrypt); } -static int crypto_ecb_decrypt(struct skcipher_request *req) +static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, u32 flags) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, + flags & CRYPTO_LSKCIPHER_FLAG_FINAL, crypto_cipher_alg(cipher)->cia_decrypt); } -static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +static int lskcipher_setkey_simple2(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; + + crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_cipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher_spawn *spawn; + struct crypto_cipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_cipher(*ctx); +} + +static void lskcipher_free_instance_simple2(struct lskcipher_instance *inst) +{ + crypto_drop_cipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +static struct lskcipher_instance *lskcipher_alloc_instance_simple2( + struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_cipher_spawn *spawn; + struct lskcipher_instance *inst; + struct crypto_alg *cipher_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + spawn = lskcipher_instance_ctx(inst); + + err = crypto_grab_cipher(spawn, lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_spawn_cipher_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + cipher_alg); + if (err) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple2; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->cra_priority; + inst->alg.co.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; + inst->alg.co.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; + inst->alg.co.ivsize = cipher_alg->cra_blocksize; + + /* Use struct crypto_cipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_cipher *); + inst->alg.setkey = lskcipher_setkey_simple2; + inst->alg.init = lskcipher_init_tfm_simple2; + inst->alg.exit = lskcipher_exit_tfm_simple2; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple2(inst); + return ERR_PTR(err); +} + +static int crypto_ecb_create2(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple2(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */ + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + + inst->alg.encrypt = crypto_ecb_encrypt2; + inst->alg.decrypt = crypto_ecb_decrypt2; + + err = lskcipher_register_instance(tmpl, inst); + if (err) + inst->free(inst); + + return err; +} + +static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + struct lskcipher_instance *inst; + int err; + + inst = lskcipher_alloc_instance_simple(tmpl, tb); + if (IS_ERR(inst)) { + err = crypto_ecb_create2(tmpl, tb); + return err; + } + + spawn = lskcipher_instance_ctx(inst); + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + if (cipher_alg->co.ivsize) + return -EINVAL; - inst->alg.encrypt = crypto_ecb_encrypt; - inst->alg.decrypt = crypto_ecb_decrypt; + inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize; + inst->alg.setkey = cipher_alg->setkey; + inst->alg.encrypt = cipher_alg->encrypt; + inst->alg.decrypt = cipher_alg->decrypt; + inst->alg.init = cipher_alg->init; + inst->alg.exit = cipher_alg->exit; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) inst->free(inst); @@ -96,9 +219,10 @@ static void __exit crypto_ecb_module_exit(void) crypto_unregister_template(&crypto_ecb_tmpl); } -subsys_initcall(crypto_ecb_module_init); +module_init(crypto_ecb_module_init); module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/ecc.c b/crypto/ecc.c index afc6cefdc1d9..6cf9a945fc6c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -32,10 +32,10 @@ #include <linux/fips.h> #include <crypto/ecdh.h> #include <crypto/rng.h> -#include <asm/unaligned.h> +#include <crypto/internal/ecc.h> +#include <linux/unaligned.h> #include <linux/ratelimit.h> -#include "ecc.h" #include "ecc_curve_defs.h" typedef struct { @@ -60,12 +60,36 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id) return &nist_p256; case ECC_CURVE_NIST_P384: return &nist_p384; + case ECC_CURVE_NIST_P521: + return &nist_p521; default: return NULL; } } EXPORT_SYMBOL(ecc_get_curve); +void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, + u64 *out, unsigned int ndigits) +{ + int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); + unsigned int o = nbytes & 7; + __be64 msd = 0; + + /* diff > 0: not enough input bytes: set most significant digits to 0 */ + if (diff > 0) { + ndigits -= diff; + memset(&out[ndigits], 0, diff * sizeof(u64)); + } + + if (o) { + memcpy((u8 *)&msd + sizeof(msd) - o, in, o); + out[--ndigits] = be64_to_cpu(msd); + in += o; + } + ecc_swap_digits(in, out, ndigits); +} +EXPORT_SYMBOL(ecc_digits_from_bytes); + static u64 *ecc_alloc_digits_space(unsigned int ndigits) { size_t len = ndigits * sizeof(u64); @@ -81,7 +105,7 @@ static void ecc_free_digits_space(u64 *space) kfree_sensitive(space); } -static struct ecc_point *ecc_alloc_point(unsigned int ndigits) +struct ecc_point *ecc_alloc_point(unsigned int ndigits) { struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -106,8 +130,9 @@ err_alloc_x: kfree(p); return NULL; } +EXPORT_SYMBOL(ecc_alloc_point); -static void ecc_free_point(struct ecc_point *p) +void ecc_free_point(struct ecc_point *p) { if (!p) return; @@ -116,6 +141,7 @@ static void ecc_free_point(struct ecc_point *p) kfree_sensitive(p->y); kfree_sensitive(p); } +EXPORT_SYMBOL(ecc_free_point); static void vli_clear(u64 *vli, unsigned int ndigits) { @@ -165,7 +191,7 @@ static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) } /* Counts the number of bits required for vli. */ -static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) +unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) { unsigned int i, num_digits; u64 digit; @@ -180,6 +206,7 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) return ((num_digits - 1) * 64 + i); } +EXPORT_SYMBOL(vli_num_bits); /* Set dest from unaligned bit string src. */ void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits) @@ -686,7 +713,7 @@ static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod, static void vli_mmod_fast_192(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { - const unsigned int ndigits = 3; + const unsigned int ndigits = ECC_CURVE_NIST_P192_DIGITS; int carry; vli_set(result, product, ndigits); @@ -714,7 +741,7 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { int carry; - const unsigned int ndigits = 4; + const unsigned int ndigits = ECC_CURVE_NIST_P256_DIGITS; /* t */ vli_set(result, product, ndigits); @@ -797,7 +824,7 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { int carry; - const unsigned int ndigits = 6; + const unsigned int ndigits = ECC_CURVE_NIST_P384_DIGITS; /* t */ vli_set(result, product, ndigits); @@ -899,6 +926,28 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product, #undef AND64H #undef AND64L +/* + * Computes result = product % curve_prime + * from "Recommendations for Discrete Logarithm-Based Cryptography: + * Elliptic Curve Domain Parameters" section G.1.4 + */ +static void vli_mmod_fast_521(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + const unsigned int ndigits = ECC_CURVE_NIST_P521_DIGITS; + size_t i; + + /* Initialize result with lowest 521 bits from product */ + vli_set(result, product, ndigits); + result[8] &= 0x1ff; + + for (i = 0; i < ndigits; i++) + tmp[i] = (product[8 + i] >> 9) | (product[9 + i] << 55); + tmp[8] &= 0x1ff; + + vli_mod_add(result, result, tmp, curve_prime, ndigits); +} + /* Computes result = product % curve_prime for different curve_primes. * * Note that curve_primes are distinguished just by heuristic check and @@ -929,15 +978,18 @@ static bool vli_mmod_fast(u64 *result, u64 *product, } switch (ndigits) { - case 3: + case ECC_CURVE_NIST_P192_DIGITS: vli_mmod_fast_192(result, product, curve_prime, tmp); break; - case 4: + case ECC_CURVE_NIST_P256_DIGITS: vli_mmod_fast_256(result, product, curve_prime, tmp); break; - case 6: + case ECC_CURVE_NIST_P384_DIGITS: vli_mmod_fast_384(result, product, curve_prime, tmp); break; + case ECC_CURVE_NIST_P521_DIGITS: + vli_mmod_fast_521(result, product, curve_prime, tmp); + break; default: pr_err_ratelimited("ecc: unsupported digits size!\n"); return false; @@ -1062,11 +1114,12 @@ EXPORT_SYMBOL(vli_mod_inv); /* ------ Point operations ------ */ /* Returns true if p_point is the point at infinity, false otherwise. */ -static bool ecc_point_is_zero(const struct ecc_point *point) +bool ecc_point_is_zero(const struct ecc_point *point) { return (vli_is_zero(point->x, point->ndigits) && vli_is_zero(point->y, point->ndigits)); } +EXPORT_SYMBOL(ecc_point_is_zero); /* Point multiplication algorithm using Montgomery's ladder with co-Z * coordinates. From https://eprint.iacr.org/2011/338.pdf @@ -1291,7 +1344,10 @@ static void ecc_point_mult(struct ecc_point *result, carry = vli_add(sk[0], scalar, curve->n, ndigits); vli_add(sk[1], sk[0], curve->n, ndigits); scalar = sk[!carry]; - num_bits = sizeof(u64) * ndigits * 8 + 1; + if (curve->nbits == 521) /* NIST P521 */ + num_bits = curve->nbits + 2; + else + num_bits = sizeof(u64) * ndigits * 8 + 1; vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); @@ -1380,7 +1436,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result, num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits)); i = num_bits - 1; - idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); + idx = !!vli_test_bit(u1, i); + idx |= (!!vli_test_bit(u2, i)) << 1; point = points[idx]; vli_set(rx, point->x, ndigits); @@ -1390,7 +1447,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result, for (--i; i >= 0; i--) { ecc_point_double_jacobian(rx, ry, z, curve); - idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); + idx = !!vli_test_bit(u1, i); + idx |= (!!vli_test_bit(u2, i)) << 1; point = points[idx]; if (point) { u64 tx[ECC_MAX_DIGITS]; @@ -1410,6 +1468,12 @@ void ecc_point_mult_shamir(const struct ecc_point *result, } EXPORT_SYMBOL(ecc_point_mult_shamir); +/* + * This function performs checks equivalent to Appendix A.4.2 of FIPS 186-5. + * Whereas A.4.2 results in an integer in the interval [1, n-1], this function + * ensures that the integer is in the range of [2, n-3]. We are slightly + * stricter because of the currently used scalar multiplication algorithm. + */ static int __ecc_is_key_valid(const struct ecc_curve *curve, const u64 *private_key, unsigned int ndigits) { @@ -1449,31 +1513,29 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, EXPORT_SYMBOL(ecc_is_key_valid); /* - * ECC private keys are generated using the method of extra random bits, - * equivalent to that described in FIPS 186-4, Appendix B.4.1. - * - * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer - * than requested - * 0 <= c mod(n-1) <= n-2 and implies that - * 1 <= d <= n-1 + * ECC private keys are generated using the method of rejection sampling, + * equivalent to that described in FIPS 186-5, Appendix A.2.2. * * This method generates a private key uniformly distributed in the range - * [1, n-1]. + * [2, n-3]. */ -int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) +int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, + u64 *private_key) { const struct ecc_curve *curve = ecc_get_curve(curve_id); - u64 priv[ECC_MAX_DIGITS]; unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; unsigned int nbits = vli_num_bits(curve->n, ndigits); int err; - /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */ - if (nbits < 160 || ndigits > ARRAY_SIZE(priv)) + /* + * Step 1 & 2: check that N is included in Table 1 of FIPS 186-5, + * section 6.1.1. + */ + if (nbits < 224) return -EINVAL; /* - * FIPS 186-4 recommends that the private key should be obtained from a + * FIPS 186-5 recommends that the private key should be obtained from a * RBG with a security strength equal to or greater than the security * strength associated with N. * @@ -1486,17 +1548,17 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) if (crypto_get_default_rng()) return -EFAULT; - err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); + /* Step 3: obtain N returned_bits from the DRBG. */ + err = crypto_rng_get_bytes(crypto_default_rng, + (u8 *)private_key, nbytes); crypto_put_default_rng(); if (err) return err; - /* Make sure the private key is in the valid range. */ - if (__ecc_is_key_valid(curve, priv, ndigits)) + /* Step 4: make sure the private key is in the valid range. */ + if (__ecc_is_key_valid(curve, private_key, ndigits)) return -EINVAL; - ecc_swap_digits(priv, privkey, ndigits); - return 0; } EXPORT_SYMBOL(ecc_gen_privkey); @@ -1506,23 +1568,20 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, { int ret = 0; struct ecc_point *pk; - u64 priv[ECC_MAX_DIGITS]; const struct ecc_curve *curve = ecc_get_curve(curve_id); - if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) { + if (!private_key) { ret = -EINVAL; goto out; } - ecc_swap_digits(private_key, priv, ndigits); - pk = ecc_alloc_point(ndigits); if (!pk) { ret = -ENOMEM; goto out; } - ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); + ecc_point_mult(pk, &curve->g, private_key, NULL, curve, ndigits); /* SP800-56A rev 3 5.6.2.1.3 key check */ if (ecc_is_pubkey_valid_full(curve, pk)) { @@ -1606,13 +1665,11 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, { int ret = 0; struct ecc_point *product, *pk; - u64 priv[ECC_MAX_DIGITS]; u64 rand_z[ECC_MAX_DIGITS]; unsigned int nbytes; const struct ecc_curve *curve = ecc_get_curve(curve_id); - if (!private_key || !public_key || !curve || - ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) { + if (!private_key || !public_key || ndigits > ARRAY_SIZE(rand_z)) { ret = -EINVAL; goto out; } @@ -1633,15 +1690,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, if (ret) goto err_alloc_product; - ecc_swap_digits(private_key, priv, ndigits); - product = ecc_alloc_point(ndigits); if (!product) { ret = -ENOMEM; goto err_alloc_product; } - ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); + ecc_point_mult(product, pk, private_key, rand_z, curve, ndigits); if (ecc_point_is_zero(product)) { ret = -EFAULT; @@ -1651,7 +1706,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_swap_digits(product->x, secret, ndigits); err_validity: - memzero_explicit(priv, sizeof(priv)); memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: @@ -1661,4 +1715,5 @@ out: } EXPORT_SYMBOL(crypto_ecdh_shared_secret); +MODULE_DESCRIPTION("core elliptic curve module"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/ecc.h b/crypto/ecc.h deleted file mode 100644 index a006132646a4..000000000000 --- a/crypto/ecc.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (c) 2013, Kenneth MacKay - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _CRYPTO_ECC_H -#define _CRYPTO_ECC_H - -#include <crypto/ecc_curve.h> - -/* One digit is u64 qword. */ -#define ECC_CURVE_NIST_P192_DIGITS 3 -#define ECC_CURVE_NIST_P256_DIGITS 4 -#define ECC_CURVE_NIST_P384_DIGITS 6 -#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */ - -#define ECC_DIGITS_TO_BYTES_SHIFT 3 - -#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT) - -#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } - -/** - * ecc_swap_digits() - Copy ndigits from big endian array to native array - * @in: Input array - * @out: Output array - * @ndigits: Number of digits to copy - */ -static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits) -{ - const __be64 *src = (__force __be64 *)in; - int i; - - for (i = 0; i < ndigits; i++) - out[i] = be64_to_cpu(src[ndigits - 1 - i]); -} - -/** - * ecc_is_key_valid() - Validate a given ECDH private key - * - * @curve_id: id representing the curve to use - * @ndigits: curve's number of digits - * @private_key: private key to be used for the given curve - * @private_key_len: private key length - * - * Returns 0 if the key is acceptable, a negative value otherwise - */ -int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, - const u64 *private_key, unsigned int private_key_len); - -/** - * ecc_gen_privkey() - Generates an ECC private key. - * The private key is a random integer in the range 0 < random < n, where n is a - * prime that is the order of the cyclic subgroup generated by the distinguished - * point G. - * @curve_id: id representing the curve to use - * @ndigits: curve number of digits - * @private_key: buffer for storing the generated private key - * - * Returns 0 if the private key was generated successfully, a negative value - * if an error occurred. - */ -int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey); - -/** - * ecc_make_pub_key() - Compute an ECC public key - * - * @curve_id: id representing the curve to use - * @ndigits: curve's number of digits - * @private_key: pregenerated private key for the given curve - * @public_key: buffer for storing the generated public key - * - * Returns 0 if the public key was generated successfully, a negative value - * if an error occurred. - */ -int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits, - const u64 *private_key, u64 *public_key); - -/** - * crypto_ecdh_shared_secret() - Compute a shared secret - * - * @curve_id: id representing the curve to use - * @ndigits: curve's number of digits - * @private_key: private key of part A - * @public_key: public key of counterpart B - * @secret: buffer for storing the calculated shared secret - * - * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret - * before using it for symmetric encryption or HMAC. - * - * Returns 0 if the shared secret was generated successfully, a negative value - * if an error occurred. - */ -int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, - const u64 *private_key, const u64 *public_key, - u64 *secret); - -/** - * ecc_is_pubkey_valid_partial() - Partial public key validation - * - * @curve: elliptic curve domain parameters - * @pk: public key as a point - * - * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial - * Public-Key Validation Routine. - * - * Note: There is no check that the public key is in the correct elliptic curve - * subgroup. - * - * Return: 0 if validation is successful, -EINVAL if validation is failed. - */ -int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, - struct ecc_point *pk); - -/** - * ecc_is_pubkey_valid_full() - Full public key validation - * - * @curve: elliptic curve domain parameters - * @pk: public key as a point - * - * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full - * Public-Key Validation Routine. - * - * Return: 0 if validation is successful, -EINVAL if validation is failed. - */ -int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, - struct ecc_point *pk); - -/** - * vli_is_zero() - Determine is vli is zero - * - * @vli: vli to check. - * @ndigits: length of the @vli - */ -bool vli_is_zero(const u64 *vli, unsigned int ndigits); - -/** - * vli_cmp() - compare left and right vlis - * - * @left: vli - * @right: vli - * @ndigits: length of both vlis - * - * Returns sign of @left - @right, i.e. -1 if @left < @right, - * 0 if @left == @right, 1 if @left > @right. - */ -int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); - -/** - * vli_sub() - Subtracts right from left - * - * @result: where to write result - * @left: vli - * @right vli - * @ndigits: length of all vlis - * - * Note: can modify in-place. - * - * Return: carry bit. - */ -u64 vli_sub(u64 *result, const u64 *left, const u64 *right, - unsigned int ndigits); - -/** - * vli_from_be64() - Load vli from big-endian u64 array - * - * @dest: destination vli - * @src: source array of u64 BE values - * @ndigits: length of both vli and array - */ -void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits); - -/** - * vli_from_le64() - Load vli from little-endian u64 array - * - * @dest: destination vli - * @src: source array of u64 LE values - * @ndigits: length of both vli and array - */ -void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits); - -/** - * vli_mod_inv() - Modular inversion - * - * @result: where to write vli number - * @input: vli value to operate on - * @mod: modulus - * @ndigits: length of all vlis - */ -void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, - unsigned int ndigits); - -/** - * vli_mod_mult_slow() - Modular multiplication - * - * @result: where to write result value - * @left: vli number to multiply with @right - * @right: vli number to multiply with @left - * @mod: modulus - * @ndigits: length of all vlis - * - * Note: Assumes that mod is big enough curve order. - */ -void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, - const u64 *mod, unsigned int ndigits); - -/** - * ecc_point_mult_shamir() - Add two points multiplied by scalars - * - * @result: resulting point - * @x: scalar to multiply with @p - * @p: point to multiply with @x - * @y: scalar to multiply with @q - * @q: point to multiply with @y - * @curve: curve - * - * Returns result = x * p + x * q over the curve. - * This works faster than two multiplications and addition. - */ -void ecc_point_mult_shamir(const struct ecc_point *result, - const u64 *x, const struct ecc_point *p, - const u64 *y, const struct ecc_point *q, - const struct ecc_curve *curve); -#endif diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index 9719934c9428..0ecade7d02f5 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -17,6 +17,7 @@ static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, 0x64210519E59C80E7ull }; static struct ecc_curve nist_p192 = { .name = "nist_192", + .nbits = 192, .g = { .x = nist_p192_g_x, .y = nist_p192_g_y, @@ -43,6 +44,7 @@ static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull }; static struct ecc_curve nist_p256 = { .name = "nist_256", + .nbits = 256, .g = { .x = nist_p256_g_x, .y = nist_p256_g_y, @@ -75,6 +77,7 @@ static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull, 0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull }; static struct ecc_curve nist_p384 = { .name = "nist_384", + .nbits = 384, .g = { .x = nist_p384_g_x, .y = nist_p384_g_y, @@ -86,6 +89,51 @@ static struct ecc_curve nist_p384 = { .b = nist_p384_b }; +/* NIST P-521 */ +static u64 nist_p521_g_x[] = { 0xf97e7e31c2e5bd66ull, 0x3348b3c1856a429bull, + 0xfe1dc127a2ffa8deull, 0xa14b5e77efe75928ull, + 0xf828af606b4d3dbaull, 0x9c648139053fb521ull, + 0x9e3ecb662395b442ull, 0x858e06b70404e9cdull, + 0xc6ull }; +static u64 nist_p521_g_y[] = { 0x88be94769fd16650ull, 0x353c7086a272c240ull, + 0xc550b9013fad0761ull, 0x97ee72995ef42640ull, + 0x17afbd17273e662cull, 0x98f54449579b4468ull, + 0x5c8a5fb42c7d1bd9ull, 0x39296a789a3bc004ull, + 0x118ull }; +static u64 nist_p521_p[] = { 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_n[] = { 0xbb6fb71e91386409ull, 0x3bb5c9b8899c47aeull, + 0x7fcc0148f709a5d0ull, 0x51868783bf2f966bull, + 0xfffffffffffffffaull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_a[] = { 0xfffffffffffffffcull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_b[] = { 0xef451fd46b503f00ull, 0x3573df883d2c34f1ull, + 0x1652c0bd3bb1bf07ull, 0x56193951ec7e937bull, + 0xb8b489918ef109e1ull, 0xa2da725b99b315f3ull, + 0x929a21a0b68540eeull, 0x953eb9618e1c9a1full, + 0x051ull }; +static struct ecc_curve nist_p521 = { + .name = "nist_521", + .nbits = 521, + .g = { + .x = nist_p521_g_x, + .y = nist_p521_g_y, + .ndigits = 9, + }, + .p = nist_p521_p, + .n = nist_p521_n, + .a = nist_p521_a, + .b = nist_p521_b +}; + /* curve25519 */ static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; @@ -95,6 +143,7 @@ static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; static const struct ecc_curve ecc_25519 = { .name = "curve25519", + .nbits = 255, .g = { .x = curve25519_g_x, .ndigits = 4, diff --git a/crypto/ecdh.c b/crypto/ecdh.c index c6f61c2211dc..9f0b93b3166d 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -6,11 +6,11 @@ */ #include <linux/module.h> +#include <crypto/internal/ecc.h> #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/ecdh.h> #include <linux/scatterlist.h> -#include "ecc.h" struct ecdh_ctx { unsigned int curve_id; @@ -28,23 +28,28 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, { struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh params; + int ret = 0; if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || params.key_size > sizeof(u64) * ctx->ndigits) return -EINVAL; + memset(ctx->private_key, 0, sizeof(ctx->private_key)); + if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); - memcpy(ctx->private_key, params.key, params.key_size); + ecc_digits_from_bytes(params.key, params.key_size, + ctx->private_key, ctx->ndigits); if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, ctx->private_key, params.key_size) < 0) { memzero_explicit(ctx->private_key, params.key_size); - return -EINVAL; + ret = -EINVAL; } - return 0; + + return ret; } static int ecdh_compute_value(struct kpp_request *req) @@ -200,7 +205,7 @@ static struct kpp_alg ecdh_nist_p384 = { static bool ecdh_nist_p192_registered; -static int ecdh_init(void) +static int __init ecdh_init(void) { int ret; @@ -227,7 +232,7 @@ nist_p256_error: return ret; } -static void ecdh_exit(void) +static void __exit ecdh_exit(void) { if (ecdh_nist_p192_registered) crypto_unregister_kpp(&ecdh_nist_p192); @@ -235,7 +240,7 @@ static void ecdh_exit(void) crypto_unregister_kpp(&ecdh_nist_p384); } -subsys_initcall(ecdh_init); +module_init(ecdh_init); module_exit(ecdh_exit); MODULE_ALIAS_CRYPTO("ecdh"); MODULE_LICENSE("GPL"); diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c new file mode 100644 index 000000000000..e0c55c64711c --- /dev/null +++ b/crypto/ecdsa-p1363.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ECDSA P1363 signature encoding + * + * Copyright (c) 2024 Intel Corporation + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <crypto/algapi.h> +#include <crypto/sig.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> + +struct ecdsa_p1363_ctx { + struct crypto_sig *child; +}; + +static int ecdsa_p1363_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); + unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64)); + struct ecdsa_raw_sig sig; + + if (slen != 2 * keylen) + return -EINVAL; + + ecc_digits_from_bytes(src, keylen, sig.r, ndigits); + ecc_digits_from_bytes(src + keylen, keylen, sig.s, ndigits); + + return crypto_sig_verify(ctx->child, &sig, sizeof(sig), digest, dlen); +} + +static unsigned int ecdsa_p1363_key_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_keysize(ctx->child); +} + +static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); +} + +static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_digestsize(ctx->child); +} + +static int ecdsa_p1363_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_set_pubkey(ctx->child, key, keylen); +} + +static int ecdsa_p1363_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_sig *child_tfm; + + child_tfm = crypto_spawn_sig(spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void ecdsa_p1363_exit_tfm(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_sig(ctx->child); +} + +static void ecdsa_p1363_free(struct sig_instance *inst) +{ + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + + crypto_drop_sig(spawn); + kfree(inst); +} + +static int ecdsa_p1363_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_sig_spawn *spawn; + struct sig_instance *inst; + struct sig_alg *ecdsa_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = sig_instance_ctx(inst); + + err = crypto_grab_sig(spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + ecdsa_alg = crypto_spawn_sig_alg(spawn); + + err = -EINVAL; + if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0) + goto err_free_inst; + + err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name, + &ecdsa_alg->base); + if (err) + goto err_free_inst; + + inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_p1363_ctx); + + inst->alg.init = ecdsa_p1363_init_tfm; + inst->alg.exit = ecdsa_p1363_exit_tfm; + + inst->alg.verify = ecdsa_p1363_verify; + inst->alg.key_size = ecdsa_p1363_key_size; + inst->alg.max_size = ecdsa_p1363_max_size; + inst->alg.digest_size = ecdsa_p1363_digest_size; + inst->alg.set_pub_key = ecdsa_p1363_set_pub_key; + + inst->free = ecdsa_p1363_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ecdsa_p1363_free(inst); + } + return err; +} + +struct crypto_template ecdsa_p1363_tmpl = { + .name = "p1363", + .create = ecdsa_p1363_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("p1363"); diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c new file mode 100644 index 000000000000..ee71594d10a0 --- /dev/null +++ b/crypto/ecdsa-x962.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ECDSA X9.62 signature encoding + * + * Copyright (c) 2021 IBM Corporation + * Copyright (c) 2024 Intel Corporation + */ + +#include <linux/asn1_decoder.h> +#include <linux/err.h> +#include <linux/module.h> +#include <crypto/algapi.h> +#include <crypto/sig.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> + +#include "ecdsasignature.asn1.h" + +struct ecdsa_x962_ctx { + struct crypto_sig *child; +}; + +struct ecdsa_x962_signature_ctx { + struct ecdsa_raw_sig sig; + unsigned int ndigits; +}; + +/* Get the r and s components of a signature from the X.509 certificate. */ +static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen, + unsigned int ndigits) +{ + size_t bufsize = ndigits * sizeof(u64); + const char *d = value; + + if (!value || !vlen || vlen > bufsize + 1) + return -EINVAL; + + /* + * vlen may be 1 byte larger than bufsize due to a leading zero byte + * (necessary if the most significant bit of the integer is set). + */ + if (vlen > bufsize) { + /* skip over leading zeros that make 'value' a positive int */ + if (*d == 0) { + vlen -= 1; + d++; + } else { + return -EINVAL; + } + } + + ecc_digits_from_bytes(d, vlen, dest, ndigits); + + return 0; +} + +int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_x962_signature_ctx *sig_ctx = context; + + return ecdsa_get_signature_rs(sig_ctx->sig.r, hdrlen, tag, value, vlen, + sig_ctx->ndigits); +} + +int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_x962_signature_ctx *sig_ctx = context; + + return ecdsa_get_signature_rs(sig_ctx->sig.s, hdrlen, tag, value, vlen, + sig_ctx->ndigits); +} + +static int ecdsa_x962_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct ecdsa_x962_signature_ctx sig_ctx; + int err; + + sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + sizeof(u64) * BITS_PER_BYTE); + + err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen); + if (err < 0) + return err; + + return crypto_sig_verify(ctx->child, &sig_ctx.sig, sizeof(sig_ctx.sig), + digest, dlen); +} + +static unsigned int ecdsa_x962_key_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_keysize(ctx->child); +} + +static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct sig_alg *alg = crypto_sig_alg(ctx->child); + int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); + + /* + * Verify takes ECDSA-Sig-Value (described in RFC 5480) as input, + * which is actually 2 'key_size'-bit integers encoded in ASN.1. + * Account for the ASN.1 encoding overhead here. + * + * NIST P192/256/384 may prepend a '0' to a coordinate to indicate + * a positive integer. NIST P521 never needs it. + */ + if (strcmp(alg->base.cra_name, "ecdsa-nist-p521") != 0) + slen += 1; + + /* Length of encoding the x & y coordinates */ + slen = 2 * (slen + 2); + + /* + * If coordinate encoding takes at least 128 bytes then an + * additional byte for length encoding is needed. + */ + return 1 + (slen >= 128) + 1 + slen; +} + +static unsigned int ecdsa_x962_digest_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_digestsize(ctx->child); +} + +static int ecdsa_x962_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_set_pubkey(ctx->child, key, keylen); +} + +static int ecdsa_x962_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_sig *child_tfm; + + child_tfm = crypto_spawn_sig(spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void ecdsa_x962_exit_tfm(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_sig(ctx->child); +} + +static void ecdsa_x962_free(struct sig_instance *inst) +{ + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + + crypto_drop_sig(spawn); + kfree(inst); +} + +static int ecdsa_x962_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_sig_spawn *spawn; + struct sig_instance *inst; + struct sig_alg *ecdsa_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = sig_instance_ctx(inst); + + err = crypto_grab_sig(spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + ecdsa_alg = crypto_spawn_sig_alg(spawn); + + err = -EINVAL; + if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0) + goto err_free_inst; + + err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name, + &ecdsa_alg->base); + if (err) + goto err_free_inst; + + inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_x962_ctx); + + inst->alg.init = ecdsa_x962_init_tfm; + inst->alg.exit = ecdsa_x962_exit_tfm; + + inst->alg.verify = ecdsa_x962_verify; + inst->alg.key_size = ecdsa_x962_key_size; + inst->alg.max_size = ecdsa_x962_max_size; + inst->alg.digest_size = ecdsa_x962_digest_size; + inst->alg.set_pub_key = ecdsa_x962_set_pub_key; + + inst->free = ecdsa_x962_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ecdsa_x962_free(inst); + } + return err; +} + +struct crypto_template ecdsa_x962_tmpl = { + .name = "x962", + .create = ecdsa_x962_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("x962"); diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c index 1e7b15009bf6..ce8e4364842f 100644 --- a/crypto/ecdsa.c +++ b/crypto/ecdsa.c @@ -4,14 +4,11 @@ */ #include <linux/module.h> -#include <crypto/internal/akcipher.h> -#include <crypto/akcipher.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> #include <crypto/ecdh.h> -#include <linux/asn1_decoder.h> -#include <linux/scatterlist.h> - -#include "ecc.h" -#include "ecdsasignature.asn1.h" +#include <crypto/sha2.h> +#include <crypto/sig.h> struct ecc_ctx { unsigned int curve_id; @@ -23,74 +20,6 @@ struct ecc_ctx { struct ecc_point pub_key; }; -struct ecdsa_signature_ctx { - const struct ecc_curve *curve; - u64 r[ECC_MAX_DIGITS]; - u64 s[ECC_MAX_DIGITS]; -}; - -/* - * Get the r and s components of a signature from the X509 certificate. - */ -static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen, unsigned int ndigits) -{ - size_t keylen = ndigits * sizeof(u64); - ssize_t diff = vlen - keylen; - const char *d = value; - u8 rs[ECC_MAX_BYTES]; - - if (!value || !vlen) - return -EINVAL; - - /* diff = 0: 'value' has exacly the right size - * diff > 0: 'value' has too many bytes; one leading zero is allowed that - * makes the value a positive integer; error on more - * diff < 0: 'value' is missing leading zeros, which we add - */ - if (diff > 0) { - /* skip over leading zeros that make 'value' a positive int */ - if (*d == 0) { - vlen -= 1; - diff--; - d++; - } - if (diff) - return -EINVAL; - } - if (-diff >= keylen) - return -EINVAL; - - if (diff) { - /* leading zeros not given in 'value' */ - memset(rs, 0, -diff); - } - - memcpy(&rs[-diff], d, vlen); - - ecc_swap_digits((u64 *)rs, dest, ndigits); - - return 0; -} - -int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct ecdsa_signature_ctx *sig = context; - - return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen, - sig->curve->g.ndigits); -} - -int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct ecdsa_signature_ctx *sig = context; - - return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen, - sig->curve->g.ndigits); -} - static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s) { const struct ecc_curve *curve = ctx->curve; @@ -122,7 +51,7 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con /* res.x = res.x mod n (if res.x > order) */ if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1)) - /* faster alternative for NIST p384, p256 & p192 */ + /* faster alternative for NIST p521, p384, p256 & p192 */ vli_sub(res.x, res.x, curve->n, ndigits); if (!vli_cmp(res.x, r, ndigits)) @@ -134,55 +63,27 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con /* * Verify an ECDSA signature. */ -static int ecdsa_verify(struct akcipher_request *req) +static int ecdsa_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) { - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); - size_t keylen = ctx->curve->g.ndigits * sizeof(u64); - struct ecdsa_signature_ctx sig_ctx = { - .curve = ctx->curve, - }; - u8 rawhash[ECC_MAX_BYTES]; + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + size_t bufsize = ctx->curve->g.ndigits * sizeof(u64); + const struct ecdsa_raw_sig *sig = src; u64 hash[ECC_MAX_DIGITS]; - unsigned char *buffer; - ssize_t diff; - int ret; if (unlikely(!ctx->pub_key_set)) return -EINVAL; - buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, req->src_len + req->dst_len), - buffer, req->src_len + req->dst_len, 0); - - ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, - buffer, req->src_len); - if (ret < 0) - goto error; - - /* if the hash is shorter then we will add leading zeros to fit to ndigits */ - diff = keylen - req->dst_len; - if (diff >= 0) { - if (diff) - memset(rawhash, 0, diff); - memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len); - } else if (diff < 0) { - /* given hash is longer, we take the left-most bytes */ - memcpy(&rawhash, buffer + req->src_len, keylen); - } - - ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); + if (slen != sizeof(*sig)) + return -EINVAL; - ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + if (bufsize > dlen) + bufsize = dlen; -error: - kfree(buffer); + ecc_digits_from_bytes(digest, bufsize, hash, ctx->curve->g.ndigits); - return ret; + return _ecdsa_verify(ctx, hash, sig->r, sig->s); } static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) @@ -215,35 +116,39 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx) } /* - * Set the public key given the raw uncompressed key data from an X509 - * certificate. The key data contain the concatenated X and Y coordinates of - * the public key. + * Set the public ECC key as defined by RFC5480 section 2.2 "Subject Public + * Key". Only the uncompressed format is supported. */ -static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) +static int ecdsa_set_pub_key(struct crypto_sig *tfm, const void *key, + unsigned int keylen) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int digitlen, ndigits; const unsigned char *d = key; - const u64 *digits = (const u64 *)&d[1]; - unsigned int ndigits; int ret; ret = ecdsa_ecc_ctx_reset(ctx); if (ret < 0) return ret; - if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0) + if (keylen < 1 || ((keylen - 1) & 1) != 0) return -EINVAL; /* we only accept uncompressed format indicated by '4' */ if (d[0] != 4) return -EINVAL; keylen--; - ndigits = (keylen >> 1) / sizeof(u64); + digitlen = keylen >> 1; + + ndigits = DIV_ROUND_UP(digitlen, sizeof(u64)); if (ndigits != ctx->curve->g.ndigits) return -EINVAL; - ecc_swap_digits(digits, ctx->pub_key.x, ndigits); - ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); + d++; + + ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits); + ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits); + ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); ctx->pub_key_set = ret == 0; @@ -251,31 +156,66 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig return ret; } -static void ecdsa_exit_tfm(struct crypto_akcipher *tfm) +static void ecdsa_exit_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); ecdsa_ecc_ctx_deinit(ctx); } -static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm) +static unsigned int ecdsa_key_size(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); - return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + return ctx->curve->nbits; +} + +static unsigned int ecdsa_digest_size(struct crypto_sig *tfm) +{ + /* + * ECDSA key sizes are much smaller than RSA, and thus could + * operate on (hashed) inputs that are larger than the key size. + * E.g. SHA384-hashed input used with secp256r1 based keys. + * Return the largest supported hash size (SHA512). + */ + return SHA512_DIGEST_SIZE; } -static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p521_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P521); +} + +static struct sig_alg ecdsa_nist_p521 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, + .init = ecdsa_nist_p521_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p521", + .cra_driver_name = "ecdsa-nist-p521-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p384_init_tfm(struct crypto_sig *tfm) +{ + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384); } -static struct akcipher_alg ecdsa_nist_p384 = { +static struct sig_alg ecdsa_nist_p384 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p384_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -287,17 +227,18 @@ static struct akcipher_alg ecdsa_nist_p384 = { }, }; -static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p256_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256); } -static struct akcipher_alg ecdsa_nist_p256 = { +static struct sig_alg ecdsa_nist_p256 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p256_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -309,17 +250,18 @@ static struct akcipher_alg ecdsa_nist_p256 = { }, }; -static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p192_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192); } -static struct akcipher_alg ecdsa_nist_p192 = { +static struct sig_alg ecdsa_nist_p192 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p192_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -332,45 +274,74 @@ static struct akcipher_alg ecdsa_nist_p192 = { }; static bool ecdsa_nist_p192_registered; -static int ecdsa_init(void) +static int __init ecdsa_init(void) { int ret; /* NIST p192 may not be available in FIPS mode */ - ret = crypto_register_akcipher(&ecdsa_nist_p192); + ret = crypto_register_sig(&ecdsa_nist_p192); ecdsa_nist_p192_registered = ret == 0; - ret = crypto_register_akcipher(&ecdsa_nist_p256); + ret = crypto_register_sig(&ecdsa_nist_p256); if (ret) goto nist_p256_error; - ret = crypto_register_akcipher(&ecdsa_nist_p384); + ret = crypto_register_sig(&ecdsa_nist_p384); if (ret) goto nist_p384_error; + ret = crypto_register_sig(&ecdsa_nist_p521); + if (ret) + goto nist_p521_error; + + ret = crypto_register_template(&ecdsa_x962_tmpl); + if (ret) + goto x962_tmpl_error; + + ret = crypto_register_template(&ecdsa_p1363_tmpl); + if (ret) + goto p1363_tmpl_error; + return 0; +p1363_tmpl_error: + crypto_unregister_template(&ecdsa_x962_tmpl); + +x962_tmpl_error: + crypto_unregister_sig(&ecdsa_nist_p521); + +nist_p521_error: + crypto_unregister_sig(&ecdsa_nist_p384); + nist_p384_error: - crypto_unregister_akcipher(&ecdsa_nist_p256); + crypto_unregister_sig(&ecdsa_nist_p256); nist_p256_error: if (ecdsa_nist_p192_registered) - crypto_unregister_akcipher(&ecdsa_nist_p192); + crypto_unregister_sig(&ecdsa_nist_p192); return ret; } -static void ecdsa_exit(void) +static void __exit ecdsa_exit(void) { + crypto_unregister_template(&ecdsa_x962_tmpl); + crypto_unregister_template(&ecdsa_p1363_tmpl); + if (ecdsa_nist_p192_registered) - crypto_unregister_akcipher(&ecdsa_nist_p192); - crypto_unregister_akcipher(&ecdsa_nist_p256); - crypto_unregister_akcipher(&ecdsa_nist_p384); + crypto_unregister_sig(&ecdsa_nist_p192); + crypto_unregister_sig(&ecdsa_nist_p256); + crypto_unregister_sig(&ecdsa_nist_p384); + crypto_unregister_sig(&ecdsa_nist_p521); } -subsys_initcall(ecdsa_init); +module_init(ecdsa_init); module_exit(ecdsa_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>"); MODULE_DESCRIPTION("ECDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p192"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p256"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p384"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p521"); MODULE_ALIAS_CRYPTO("ecdsa-generic"); diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 69686668625e..e0a2d3209938 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req) u64 seqno; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); - int err; if (req->cryptlen < ivsize) return -EINVAL; @@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req) info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); @@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void) crypto_unregister_template(&echainiv_tmpl); } -subsys_initcall(echainiv_module_init); +module_init(echainiv_module_init); module_exit(echainiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c index 6a3fd09057d0..2c0602f0cd40 100644 --- a/crypto/ecrdsa.c +++ b/crypto/ecrdsa.c @@ -18,14 +18,13 @@ #include <linux/module.h> #include <linux/crypto.h> +#include <crypto/sig.h> #include <crypto/streebog.h> -#include <crypto/internal/akcipher.h> -#include <crypto/akcipher.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> #include <linux/oid_registry.h> -#include <linux/scatterlist.h> #include "ecrdsa_params.asn1.h" #include "ecrdsa_pub_key.asn1.h" -#include "ecc.h" #include "ecrdsa_defs.h" #define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8) @@ -68,13 +67,12 @@ static const struct ecc_curve *get_curve_by_oid(enum OID oid) } } -static int ecrdsa_verify(struct akcipher_request *req) +static int ecrdsa_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) { - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); - unsigned char sig[ECRDSA_MAX_SIG_SIZE]; - unsigned char digest[STREEBOG512_DIGEST_SIZE]; - unsigned int ndigits = req->dst_len / sizeof(u64); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int ndigits = dlen / sizeof(u64); u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */ u64 _r[ECRDSA_MAX_DIGITS]; /* -r */ u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */ @@ -91,37 +89,31 @@ static int ecrdsa_verify(struct akcipher_request *req) */ if (!ctx->curve || !ctx->digest || - !req->src || + !src || + !digest || !ctx->pub_key.x || - req->dst_len != ctx->digest_len || - req->dst_len != ctx->curve->g.ndigits * sizeof(u64) || + dlen != ctx->digest_len || + dlen != ctx->curve->g.ndigits * sizeof(u64) || ctx->pub_key.ndigits != ctx->curve->g.ndigits || - req->dst_len * 2 != req->src_len || - WARN_ON(req->src_len > sizeof(sig)) || - WARN_ON(req->dst_len > sizeof(digest))) + dlen * 2 != slen || + WARN_ON(slen > ECRDSA_MAX_SIG_SIZE) || + WARN_ON(dlen > STREEBOG512_DIGEST_SIZE)) return -EBADMSG; - sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len), - sig, req->src_len); - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, - req->src_len + req->dst_len), - digest, req->dst_len, req->src_len); - - vli_from_be64(s, sig, ndigits); - vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits); + vli_from_be64(s, src, ndigits); + vli_from_be64(r, src + ndigits * sizeof(u64), ndigits); /* Step 1: verify that 0 < r < q, 0 < s < q */ if (vli_is_zero(r, ndigits) || - vli_cmp(r, ctx->curve->n, ndigits) == 1 || + vli_cmp(r, ctx->curve->n, ndigits) >= 0 || vli_is_zero(s, ndigits) || - vli_cmp(s, ctx->curve->n, ndigits) == 1) + vli_cmp(s, ctx->curve->n, ndigits) >= 0) return -EKEYREJECTED; /* Step 2: calculate hash (h) of the message (passed as input) */ /* Step 3: calculate e = h \mod q */ vli_from_le64(e, digest, ndigits); - if (vli_cmp(e, ctx->curve->n, ndigits) == 1) + if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) vli_sub(e, e, ctx->curve->n, ndigits); if (vli_is_zero(e, ndigits)) e[0] = 1; @@ -137,7 +129,7 @@ static int ecrdsa_verify(struct akcipher_request *req) /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, ctx->curve); - if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) + if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); /* Step 7: if R == r signature is valid */ @@ -188,10 +180,10 @@ static u8 *ecrdsa_unpack_u32(u32 *dst, void *src) } /* Parse BER encoded subjectPublicKey. */ -static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, +static int ecrdsa_set_pub_key(struct crypto_sig *tfm, const void *key, unsigned int keylen) { - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); unsigned int ndigits; u32 algo, paramlen; u8 *params; @@ -249,24 +241,32 @@ static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return 0; } -static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm) +static unsigned int ecrdsa_key_size(struct crypto_sig *tfm) { - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); /* * Verify doesn't need any output, so it's just informational * for keyctl to determine the key bit size. */ - return ctx->pub_key.ndigits * sizeof(u64); + return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE; +} + +static unsigned int ecrdsa_max_size(struct crypto_sig *tfm) +{ + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); + + return 2 * ctx->pub_key.ndigits * sizeof(u64); } -static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm) +static void ecrdsa_exit_tfm(struct crypto_sig *tfm) { } -static struct akcipher_alg ecrdsa_alg = { +static struct sig_alg ecrdsa_alg = { .verify = ecrdsa_verify, .set_pub_key = ecrdsa_set_pub_key, + .key_size = ecrdsa_key_size, .max_size = ecrdsa_max_size, .exit = ecrdsa_exit_tfm, .base = { @@ -280,12 +280,12 @@ static struct akcipher_alg ecrdsa_alg = { static int __init ecrdsa_mod_init(void) { - return crypto_register_akcipher(&ecrdsa_alg); + return crypto_register_sig(&ecrdsa_alg); } static void __exit ecrdsa_mod_fini(void) { - crypto_unregister_akcipher(&ecrdsa_alg); + crypto_unregister_sig(&ecrdsa_alg); } module_init(ecrdsa_mod_init); @@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>"); MODULE_DESCRIPTION("EC-RDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecrdsa"); MODULE_ALIAS_CRYPTO("ecrdsa-generic"); diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h index 170baf039007..1c2c2449e331 100644 --- a/crypto/ecrdsa_defs.h +++ b/crypto/ecrdsa_defs.h @@ -13,7 +13,7 @@ #ifndef _CRYTO_ECRDSA_DEFS_H #define _CRYTO_ECRDSA_DEFS_H -#include "ecc.h" +#include <crypto/internal/ecc.h> #define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8) #define ECRDSA_MAX_DIGITS (512 / 64) @@ -47,6 +47,7 @@ static u64 cp256a_b[] = { static struct ecc_curve gost_cp256a = { .name = "cp256a", + .nbits = 256, .g = { .x = cp256a_g_x, .y = cp256a_g_y, @@ -80,6 +81,7 @@ static u64 cp256b_b[] = { static struct ecc_curve gost_cp256b = { .name = "cp256b", + .nbits = 256, .g = { .x = cp256b_g_x, .y = cp256b_g_y, @@ -117,6 +119,7 @@ static u64 cp256c_b[] = { static struct ecc_curve gost_cp256c = { .name = "cp256c", + .nbits = 256, .g = { .x = cp256c_g_x, .y = cp256c_g_y, @@ -166,6 +169,7 @@ static u64 tc512a_b[] = { static struct ecc_curve gost_tc512a = { .name = "tc512a", + .nbits = 512, .g = { .x = tc512a_g_x, .y = tc512a_g_y, @@ -211,6 +215,7 @@ static u64 tc512b_b[] = { static struct ecc_curve gost_tc512b = { .name = "tc512b", + .nbits = 512, .g = { .x = tc512b_g_x, .y = tc512b_g_y, diff --git a/crypto/essiv.c b/crypto/essiv.c index 8bcc5bdcb2a9..a47a3eab6935 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -131,9 +131,9 @@ static int essiv_aead_setauthsize(struct crypto_aead *tfm, return crypto_aead_setauthsize(tctx->u.aead, authsize); } -static void essiv_skcipher_done(struct crypto_async_request *areq, int err) +static void essiv_skcipher_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; skcipher_request_complete(req, err); } @@ -166,12 +166,17 @@ static int essiv_skcipher_decrypt(struct skcipher_request *req) return essiv_skcipher_crypt(req, false); } -static void essiv_aead_done(struct crypto_async_request *areq, int err) +static void essiv_aead_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); + if (err == -EINPROGRESS) + goto out; + kfree(rctx->assoc); + +out: aead_request_complete(req, err); } @@ -181,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm); struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->aead_req; + int ivsize = crypto_aead_ivsize(tfm); + int ssize = req->assoclen - ivsize; struct scatterlist *src = req->src; int err; + if (ssize < 0) + return -EINVAL; + crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv); /* @@ -193,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) */ rctx->assoc = NULL; if (req->src == req->dst || !enc) { - scatterwalk_map_and_copy(req->iv, req->dst, - req->assoclen - crypto_aead_ivsize(tfm), - crypto_aead_ivsize(tfm), 1); + scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1); } else { u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset; - int ivsize = crypto_aead_ivsize(tfm); - int ssize = req->assoclen - ivsize; struct scatterlist *sg; int nents; - if (ssize < 0) - return -EINVAL; - nents = sg_nents_for_len(req->src, ssize); if (nents < 0) return -EINVAL; @@ -247,7 +250,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) err = enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); - if (rctx->assoc && err != -EINPROGRESS) + if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY) kfree(rctx->assoc); return err; } @@ -400,8 +403,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name) if (len >= CRYPTO_MAX_ALG_NAME) return false; - memcpy(essiv_cipher_name, p, len); - essiv_cipher_name[len] = '\0'; + strscpy(essiv_cipher_name, p, len + 1); return true; } @@ -437,6 +439,7 @@ out: static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *skcipher_alg = NULL; struct crypto_attr_type *algt; const char *inner_cipher_name; const char *shash_name; @@ -445,7 +448,6 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_instance *inst; struct crypto_alg *base, *block_base; struct essiv_instance_ctx *ictx; - struct skcipher_alg *skcipher_alg = NULL; struct aead_alg *aead_alg = NULL; struct crypto_alg *_hash_alg; struct shash_alg *hash_alg; @@ -470,7 +472,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) mask = crypto_algt_inherited_mask(algt); switch (type) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: skcipher_inst = kzalloc(sizeof(*skcipher_inst) + sizeof(*ictx), GFP_KERNEL); if (!skcipher_inst) @@ -484,9 +486,10 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) inner_cipher_name, 0, mask); if (err) goto out_free_inst; - skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn); + skcipher_alg = crypto_spawn_skcipher_alg_common( + &ictx->u.skcipher_spawn); block_base = &skcipher_alg->base; - ivsize = crypto_skcipher_alg_ivsize(skcipher_alg); + ivsize = skcipher_alg->ivsize; break; case CRYPTO_ALG_TYPE_AEAD: @@ -543,8 +546,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) } /* record the driver name so we can instantiate this exact algo later */ - strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, - CRYPTO_MAX_ALG_NAME); + strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name); /* Instance fields */ @@ -569,18 +571,17 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) base->cra_alignmask = block_base->cra_alignmask; base->cra_priority = block_base->cra_priority; - if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) { skcipher_inst->alg.setkey = essiv_skcipher_setkey; skcipher_inst->alg.encrypt = essiv_skcipher_encrypt; skcipher_inst->alg.decrypt = essiv_skcipher_decrypt; skcipher_inst->alg.init = essiv_skcipher_init_tfm; skcipher_inst->alg.exit = essiv_skcipher_exit_tfm; - skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg); - skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg); + skcipher_inst->alg.min_keysize = skcipher_alg->min_keysize; + skcipher_inst->alg.max_keysize = skcipher_alg->max_keysize; skcipher_inst->alg.ivsize = ivsize; - skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg); - skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg); + skcipher_inst->alg.chunksize = skcipher_alg->chunksize; skcipher_inst->free = essiv_skcipher_free_instance; @@ -611,7 +612,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) out_free_hash: crypto_mod_put(_hash_alg); out_drop_skcipher: - if (type == CRYPTO_ALG_TYPE_SKCIPHER) + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) crypto_drop_skcipher(&ictx->u.skcipher_spawn); else crypto_drop_aead(&ictx->u.aead_spawn); @@ -638,10 +639,10 @@ static void __exit essiv_module_exit(void) crypto_unregister_template(&essiv_tmpl); } -subsys_initcall(essiv_module_init); +module_init(essiv_module_init); module_exit(essiv_module_exit); MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("essiv"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 76a04d000c0d..80036835cec5 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -43,10 +43,10 @@ */ #include <asm/byteorder.h> +#include <crypto/algapi.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #define ROUNDS 16 @@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void) crypto_unregister_alg(&fcrypt_alg); } -subsys_initcall(fcrypt_mod_init); +module_init(fcrypt_mod_init); module_exit(fcrypt_mod_fini); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/fips.c b/crypto/fips.c index 7b1d8caee669..65d2bc070a26 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,8 @@ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/notifier.h> +#include <linux/string_choices.h> +#include <generated/utsrelease.h> int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -22,39 +24,55 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain); /* Process kernel command-line parameter at boot time. fips=0 or fips=1 */ static int fips_enable(char *str) { - fips_enabled = !!simple_strtol(str, NULL, 0); - printk(KERN_INFO "fips mode: %s\n", - fips_enabled ? "enabled" : "disabled"); + if (kstrtoint(str, 0, &fips_enabled)) + return 0; + + fips_enabled = !!fips_enabled; + pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled)); return 1; } __setup("fips=", fips_enable); -static struct ctl_table crypto_sysctl_table[] = { +#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME +#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION +#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION +#else +#define FIPS_MODULE_VERSION UTS_RELEASE +#endif + +static char fips_name[] = FIPS_MODULE_NAME; +static char fips_version[] = FIPS_MODULE_VERSION; + +static const struct ctl_table crypto_sysctl_table[] = { { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + { + .procname = "fips_name", + .data = &fips_name, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring }, - {} -}; - -static struct ctl_table crypto_dir_table[] = { { - .procname = "crypto", - .mode = 0555, - .child = crypto_sysctl_table + .procname = "fips_version", + .data = &fips_version, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring }, - {} }; static struct ctl_table_header *crypto_sysctls; static void crypto_proc_fips_init(void) { - crypto_sysctls = register_sysctl_table(crypto_dir_table); + crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table); } static void crypto_proc_fips_exit(void) @@ -80,5 +98,5 @@ static void __exit fips_exit(void) crypto_proc_fips_exit(); } -subsys_initcall(fips_init); +module_init(fips_init); module_exit(fips_exit); diff --git a/crypto/gcm.c b/crypto/gcm.c index 338ee0769747..97716482bed0 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -9,7 +9,6 @@ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include <crypto/hash.h> @@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx { struct crypto_rfc4543_ctx { struct crypto_aead *child; - struct crypto_sync_skcipher *null; u8 nonce[4]; }; @@ -79,8 +77,6 @@ static struct { struct scatterlist sg; } *gcm_zeroes; -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); - static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { @@ -197,7 +193,7 @@ static inline unsigned int gcm_remain(unsigned int len) return len ? 16 - len : 0; } -static void gcm_hash_len_done(struct crypto_async_request *areq, int err); +static void gcm_hash_len_done(void *data, int err); static int gcm_hash_update(struct aead_request *req, crypto_completion_t compl, @@ -246,9 +242,9 @@ static int gcm_hash_len_continue(struct aead_request *req, u32 flags) return gctx->complete(req, flags); } -static void gcm_hash_len_done(struct crypto_async_request *areq, int err) +static void gcm_hash_len_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -267,10 +263,9 @@ static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags) gcm_hash_len_continue(req, flags); } -static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_crypt_remain_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -298,9 +293,9 @@ static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags) return gcm_hash_crypt_remain_continue(req, flags); } -static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) +static void gcm_hash_crypt_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -326,10 +321,9 @@ static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags) return gcm_hash_crypt_remain_continue(req, flags); } -static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_assoc_remain_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -355,9 +349,9 @@ static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags) return gcm_hash_assoc_remain_continue(req, flags); } -static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) +static void gcm_hash_assoc_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -380,9 +374,9 @@ static int gcm_hash_init_continue(struct aead_request *req, u32 flags) return gcm_hash_assoc_remain_continue(req, flags); } -static void gcm_hash_init_done(struct crypto_async_request *areq, int err) +static void gcm_hash_init_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -433,9 +427,9 @@ static int gcm_encrypt_continue(struct aead_request *req, u32 flags) return gcm_hash(req, flags); } -static void gcm_encrypt_done(struct crypto_async_request *areq, int err) +static void gcm_encrypt_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (err) goto out; @@ -477,9 +471,9 @@ static int crypto_gcm_verify(struct aead_request *req) return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } -static void gcm_decrypt_done(struct crypto_async_request *areq, int err) +static void gcm_decrypt_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; if (!err) err = crypto_gcm_verify(req); @@ -578,10 +572,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; - struct skcipher_alg *ctr; struct hash_alg_common *ghash; int err; @@ -609,13 +603,12 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ctx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; @@ -632,11 +625,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; @@ -934,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) unsigned int authsize = crypto_aead_authsize(aead); u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); - int err; if (req->src != req->dst) { - err = crypto_rfc4543_copy_src_to_dst(req, enc); - if (err) - return err; + unsigned int nbytes = req->assoclen + req->cryptlen - + (enc ? 0 : authsize); + + memcpy_sglist(req->dst, req->src, nbytes); } memcpy(iv, ctx->nonce, 4); @@ -956,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); } -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); - unsigned int authsize = crypto_aead_authsize(aead); - unsigned int nbytes = req->assoclen + req->cryptlen - - (enc ? 0 : authsize); - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); - - skcipher_request_set_sync_tfm(nreq, ctx->null); - skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); - - return crypto_skcipher_encrypt(nreq); -} - static int crypto_rfc4543_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: @@ -991,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; - struct crypto_sync_skcipher *null; unsigned long align; - int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_aead; - ctx->child = aead; - ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); @@ -1016,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) align + GCM_AES_IV_SIZE); return 0; - -err_free_aead: - crypto_free_aead(aead); - return err; } static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) @@ -1027,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } static void crypto_rfc4543_free(struct aead_instance *inst) @@ -1156,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void) ARRAY_SIZE(crypto_gcm_tmpls)); } -subsys_initcall(crypto_gcm_module_init); +module_init(crypto_gcm_module_init); module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/geniv.c b/crypto/geniv.c index bee4621b4f12..42eff6a7387c 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -9,7 +9,6 @@ #include <crypto/internal/geniv.h> #include <crypto/internal/rng.h> -#include <crypto/null.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> @@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead) if (err) goto out; - ctx->sknull = crypto_get_default_null_skcipher(); - err = PTR_ERR(ctx->sknull); - if (IS_ERR(ctx->sknull)) - goto out; - child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); if (IS_ERR(child)) - goto drop_null; + goto out; ctx->child = child; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) + @@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead) out: return err; - -drop_null: - crypto_put_default_null_skcipher(); - goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); @@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm) struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c deleted file mode 100644 index a69ae3e6c16c..000000000000 --- a/crypto/gf128mul.c +++ /dev/null @@ -1,416 +0,0 @@ -/* gf128mul.c - GF(2^128) multiplication functions - * - * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. - * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> - * - * Based on Dr Brian Gladman's (GPL'd) work published at - * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php - * See the original copyright notice below. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -/* - --------------------------------------------------------------------------- - Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. - - LICENSE TERMS - - The free distribution and use of this software in both source and binary - form is allowed (with or without changes) provided that: - - 1. distributions of this source code include the above copyright - notice, this list of conditions and the following disclaimer; - - 2. distributions in binary form include the above copyright - notice, this list of conditions and the following disclaimer - in the documentation and/or other associated materials; - - 3. the copyright holder's name is not used to endorse products - built using this software without specific written permission. - - ALTERNATIVELY, provided that this notice is retained in full, this product - may be distributed under the terms of the GNU General Public License (GPL), - in which case the provisions of the GPL apply INSTEAD OF those given above. - - DISCLAIMER - - This software is provided 'as is' with no explicit or implied warranties - in respect of its properties, including, but not limited to, correctness - and/or fitness for purpose. - --------------------------------------------------------------------------- - Issue 31/01/2006 - - This file provides fast multiplication in GF(2^128) as required by several - cryptographic authentication modes -*/ - -#include <crypto/gf128mul.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> - -#define gf128mul_dat(q) { \ - q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ - q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ - q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ - q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ - q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ - q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ - q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ - q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ - q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\ - q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\ - q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\ - q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\ - q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\ - q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\ - q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\ - q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\ - q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\ - q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\ - q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\ - q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\ - q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\ - q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\ - q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\ - q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\ - q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\ - q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\ - q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\ - q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\ - q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\ - q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\ - q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\ - q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \ -} - -/* - * Given a value i in 0..255 as the byte overflow when a field element - * in GF(2^128) is multiplied by x^8, the following macro returns the - * 16-bit value that must be XOR-ed into the low-degree end of the - * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1. - * - * There are two versions of the macro, and hence two tables: one for - * the "be" convention where the highest-order bit is the coefficient of - * the highest-degree polynomial term, and one for the "le" convention - * where the highest-order bit is the coefficient of the lowest-degree - * polynomial term. In both cases the values are stored in CPU byte - * endianness such that the coefficients are ordered consistently across - * bytes, i.e. in the "be" table bits 15..0 of the stored value - * correspond to the coefficients of x^15..x^0, and in the "le" table - * bits 15..0 correspond to the coefficients of x^0..x^15. - * - * Therefore, provided that the appropriate byte endianness conversions - * are done by the multiplication functions (and these must be in place - * anyway to support both little endian and big endian CPUs), the "be" - * table can be used for multiplications of both "bbe" and "ble" - * elements, and the "le" table can be used for multiplications of both - * "lle" and "lbe" elements. - */ - -#define xda_be(i) ( \ - (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \ - (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \ - (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \ - (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \ -) - -#define xda_le(i) ( \ - (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \ - (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \ - (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \ - (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \ -) - -static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le); -static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be); - -/* - * The following functions multiply a field element by x^8 in - * the polynomial field representation. They use 64-bit word operations - * to gain speed but compensate for machine endianness and hence work - * correctly on both styles of machine. - */ - -static void gf128mul_x8_lle(be128 *x) -{ - u64 a = be64_to_cpu(x->a); - u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_le[b & 0xff]; - - x->b = cpu_to_be64((b >> 8) | (a << 56)); - x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); -} - -static void gf128mul_x8_bbe(be128 *x) -{ - u64 a = be64_to_cpu(x->a); - u64 b = be64_to_cpu(x->b); - u64 _tt = gf128mul_table_be[a >> 56]; - - x->a = cpu_to_be64((a << 8) | (b >> 56)); - x->b = cpu_to_be64((b << 8) ^ _tt); -} - -void gf128mul_x8_ble(le128 *r, const le128 *x) -{ - u64 a = le64_to_cpu(x->a); - u64 b = le64_to_cpu(x->b); - u64 _tt = gf128mul_table_be[a >> 56]; - - r->a = cpu_to_le64((a << 8) | (b >> 56)); - r->b = cpu_to_le64((b << 8) ^ _tt); -} -EXPORT_SYMBOL(gf128mul_x8_ble); - -void gf128mul_lle(be128 *r, const be128 *b) -{ - be128 p[8]; - int i; - - p[0] = *r; - for (i = 0; i < 7; ++i) - gf128mul_x_lle(&p[i + 1], &p[i]); - - memset(r, 0, sizeof(*r)); - for (i = 0;;) { - u8 ch = ((u8 *)b)[15 - i]; - - if (ch & 0x80) - be128_xor(r, r, &p[0]); - if (ch & 0x40) - be128_xor(r, r, &p[1]); - if (ch & 0x20) - be128_xor(r, r, &p[2]); - if (ch & 0x10) - be128_xor(r, r, &p[3]); - if (ch & 0x08) - be128_xor(r, r, &p[4]); - if (ch & 0x04) - be128_xor(r, r, &p[5]); - if (ch & 0x02) - be128_xor(r, r, &p[6]); - if (ch & 0x01) - be128_xor(r, r, &p[7]); - - if (++i >= 16) - break; - - gf128mul_x8_lle(r); - } -} -EXPORT_SYMBOL(gf128mul_lle); - -void gf128mul_bbe(be128 *r, const be128 *b) -{ - be128 p[8]; - int i; - - p[0] = *r; - for (i = 0; i < 7; ++i) - gf128mul_x_bbe(&p[i + 1], &p[i]); - - memset(r, 0, sizeof(*r)); - for (i = 0;;) { - u8 ch = ((u8 *)b)[i]; - - if (ch & 0x80) - be128_xor(r, r, &p[7]); - if (ch & 0x40) - be128_xor(r, r, &p[6]); - if (ch & 0x20) - be128_xor(r, r, &p[5]); - if (ch & 0x10) - be128_xor(r, r, &p[4]); - if (ch & 0x08) - be128_xor(r, r, &p[3]); - if (ch & 0x04) - be128_xor(r, r, &p[2]); - if (ch & 0x02) - be128_xor(r, r, &p[1]); - if (ch & 0x01) - be128_xor(r, r, &p[0]); - - if (++i >= 16) - break; - - gf128mul_x8_bbe(r); - } -} -EXPORT_SYMBOL(gf128mul_bbe); - -/* This version uses 64k bytes of table space. - A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(2^128). If we consider a GF(2^128) value in - the buffer's lowest byte, we can construct a table of - the 256 16 byte values that result from the 256 values - of this byte. This requires 4096 bytes. But we also - need tables for each of the 16 higher bytes in the - buffer as well, which makes 64 kbytes in total. -*/ -/* additional explanation - * t[0][BYTE] contains g*BYTE - * t[1][BYTE] contains g*x^8*BYTE - * .. - * t[15][BYTE] contains g*x^120*BYTE */ -struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) -{ - struct gf128mul_64k *t; - int i, j, k; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - goto out; - - for (i = 0; i < 16; i++) { - t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL); - if (!t->t[i]) { - gf128mul_free_64k(t); - t = NULL; - goto out; - } - } - - t->t[0]->t[1] = *g; - for (j = 1; j <= 64; j <<= 1) - gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]); - - for (i = 0;;) { - for (j = 2; j < 256; j += j) - for (k = 1; k < j; ++k) - be128_xor(&t->t[i]->t[j + k], - &t->t[i]->t[j], &t->t[i]->t[k]); - - if (++i >= 16) - break; - - for (j = 128; j > 0; j >>= 1) { - t->t[i]->t[j] = t->t[i - 1]->t[j]; - gf128mul_x8_bbe(&t->t[i]->t[j]); - } - } - -out: - return t; -} -EXPORT_SYMBOL(gf128mul_init_64k_bbe); - -void gf128mul_free_64k(struct gf128mul_64k *t) -{ - int i; - - for (i = 0; i < 16; i++) - kfree_sensitive(t->t[i]); - kfree_sensitive(t); -} -EXPORT_SYMBOL(gf128mul_free_64k); - -void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t) -{ - u8 *ap = (u8 *)a; - be128 r[1]; - int i; - - *r = t->t[0]->t[ap[15]]; - for (i = 1; i < 16; ++i) - be128_xor(r, r, &t->t[i]->t[ap[15 - i]]); - *a = *r; -} -EXPORT_SYMBOL(gf128mul_64k_bbe); - -/* This version uses 4k bytes of table space. - A 16 byte buffer has to be multiplied by a 16 byte key - value in GF(2^128). If we consider a GF(2^128) value in a - single byte, we can construct a table of the 256 16 byte - values that result from the 256 values of this byte. - This requires 4096 bytes. If we take the highest byte in - the buffer and use this table to get the result, we then - have to multiply by x^120 to get the final value. For the - next highest byte the result has to be multiplied by x^112 - and so on. But we can do this by accumulating the result - in an accumulator starting with the result for the top - byte. We repeatedly multiply the accumulator value by - x^8 and then add in (i.e. xor) the 16 bytes of the next - lower byte in the buffer, stopping when we reach the - lowest byte. This requires a 4096 byte table. -*/ -struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g) -{ - struct gf128mul_4k *t; - int j, k; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - goto out; - - t->t[128] = *g; - for (j = 64; j > 0; j >>= 1) - gf128mul_x_lle(&t->t[j], &t->t[j+j]); - - for (j = 2; j < 256; j += j) - for (k = 1; k < j; ++k) - be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); - -out: - return t; -} -EXPORT_SYMBOL(gf128mul_init_4k_lle); - -struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g) -{ - struct gf128mul_4k *t; - int j, k; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - goto out; - - t->t[1] = *g; - for (j = 1; j <= 64; j <<= 1) - gf128mul_x_bbe(&t->t[j + j], &t->t[j]); - - for (j = 2; j < 256; j += j) - for (k = 1; k < j; ++k) - be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); - -out: - return t; -} -EXPORT_SYMBOL(gf128mul_init_4k_bbe); - -void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t) -{ - u8 *ap = (u8 *)a; - be128 r[1]; - int i = 15; - - *r = t->t[ap[15]]; - while (i--) { - gf128mul_x8_lle(r); - be128_xor(r, r, &t->t[ap[i]]); - } - *a = *r; -} -EXPORT_SYMBOL(gf128mul_4k_lle); - -void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t) -{ - u8 *ap = (u8 *)a; - be128 r[1]; - int i = 0; - - *r = t->t[ap[0]]; - while (++i < 16) { - gf128mul_x8_bbe(r); - be128_xor(r, r, &t->t[ap[i]]); - } - *a = *r; -} -EXPORT_SYMBOL(gf128mul_4k_bbe); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)"); diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index c70d163c1ac9..e5803c249c12 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -34,14 +34,14 @@ * (https://csrc.nist.gov/publications/detail/sp/800-38d/final) */ -#include <crypto/algapi.h> #include <crypto/gf128mul.h> #include <crypto/ghash.h> #include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/utils.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> static int ghash_init(struct shash_desc *desc) { @@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; - if (dctx->bytes) { - int n = min(srclen, dctx->bytes); - u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - - if (!dctx->bytes) - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - } - - while (srclen >= GHASH_BLOCK_SIZE) { + do { crypto_xor(dst, src, GHASH_BLOCK_SIZE); gf128mul_4k_lle((be128 *)dst, ctx->gf128); src += GHASH_BLOCK_SIZE; srclen -= GHASH_BLOCK_SIZE; - } - - if (srclen) { - dctx->bytes = GHASH_BLOCK_SIZE - srclen; - while (srclen--) - *dst++ ^= *src++; - } + } while (srclen >= GHASH_BLOCK_SIZE); - return 0; + return srclen; } -static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +static void ghash_flush(struct shash_desc *desc, const u8 *src, + unsigned int len) { + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); u8 *dst = dctx->buffer; - if (dctx->bytes) { - u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - while (dctx->bytes--) - *tmp++ ^= 0; - + if (len) { + crypto_xor(dst, src, len); gf128mul_4k_lle((be128 *)dst, ctx->gf128); } - - dctx->bytes = 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; - ghash_flush(ctx, dctx); + ghash_flush(desc, src, len); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; @@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, @@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -subsys_initcall(ghash_mod_init); +module_init(ghash_mod_init); module_exit(ghash_mod_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/hash.h b/crypto/hash.h new file mode 100644 index 000000000000..cf9aee07f77d --- /dev/null +++ b/crypto/hash.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _LOCAL_CRYPTO_HASH_H +#define _LOCAL_CRYPTO_HASH_H + +#include <crypto/internal/hash.h> + +#include "internal.h" + +extern const struct crypto_type crypto_shash_type; + +int hash_prepare_alg(struct hash_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_HASH_H */ diff --git a/crypto/hash_info.c b/crypto/hash_info.c deleted file mode 100644 index a49ff96bde77..000000000000 --- a/crypto/hash_info.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Hash Info: Hash algorithms information - * - * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> - */ - -#include <linux/export.h> -#include <crypto/hash_info.h> - -const char *const hash_algo_name[HASH_ALGO__LAST] = { - [HASH_ALGO_MD4] = "md4", - [HASH_ALGO_MD5] = "md5", - [HASH_ALGO_SHA1] = "sha1", - [HASH_ALGO_RIPE_MD_160] = "rmd160", - [HASH_ALGO_SHA256] = "sha256", - [HASH_ALGO_SHA384] = "sha384", - [HASH_ALGO_SHA512] = "sha512", - [HASH_ALGO_SHA224] = "sha224", - [HASH_ALGO_RIPE_MD_128] = "rmd128", - [HASH_ALGO_RIPE_MD_256] = "rmd256", - [HASH_ALGO_RIPE_MD_320] = "rmd320", - [HASH_ALGO_WP_256] = "wp256", - [HASH_ALGO_WP_384] = "wp384", - [HASH_ALGO_WP_512] = "wp512", - [HASH_ALGO_TGR_128] = "tgr128", - [HASH_ALGO_TGR_160] = "tgr160", - [HASH_ALGO_TGR_192] = "tgr192", - [HASH_ALGO_SM3_256] = "sm3", - [HASH_ALGO_STREEBOG_256] = "streebog256", - [HASH_ALGO_STREEBOG_512] = "streebog512", -}; -EXPORT_SYMBOL_GPL(hash_algo_name); - -const int hash_digest_size[HASH_ALGO__LAST] = { - [HASH_ALGO_MD4] = MD5_DIGEST_SIZE, - [HASH_ALGO_MD5] = MD5_DIGEST_SIZE, - [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE, - [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE, - [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE, - [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE, - [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE, - [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE, - [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE, - [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE, - [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE, - [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE, - [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE, - [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE, - [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE, - [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE, - [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE, - [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE, - [HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE, - [HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE, -}; -EXPORT_SYMBOL_GPL(hash_digest_size); diff --git a/crypto/hctr2.c b/crypto/hctr2.c new file mode 100644 index 000000000000..f4cd6c29b4d3 --- /dev/null +++ b/crypto/hctr2.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HCTR2 length-preserving encryption mode + * + * Copyright 2021 Google LLC + */ + + +/* + * HCTR2 is a length-preserving encryption mode that is efficient on + * processors with instructions to accelerate AES and carryless + * multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM + * processors with the ARMv8 crypto extensions. + * + * For more details, see the paper: "Length-preserving encryption with HCTR2" + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include <crypto/internal/cipher.h> +#include <crypto/internal/skcipher.h> +#include <crypto/polyval.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> + +#define BLOCKCIPHER_BLOCK_SIZE 16 + +/* + * The specification allows variable-length tweaks, but Linux's crypto API + * currently only allows algorithms to support a single length. The "natural" + * tweak length for HCTR2 is 16, since that fits into one POLYVAL block for + * the best performance. But longer tweaks are useful for fscrypt, to avoid + * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. + */ +#define TWEAK_SIZE 32 + +struct hctr2_instance_ctx { + struct crypto_cipher_spawn blockcipher_spawn; + struct crypto_skcipher_spawn xctr_spawn; +}; + +struct hctr2_tfm_ctx { + struct crypto_cipher *blockcipher; + struct crypto_skcipher *xctr; + struct polyval_key poly_key; + struct polyval_elem hashed_tweaklens[2]; + u8 L[BLOCKCIPHER_BLOCK_SIZE]; +}; + +struct hctr2_request_ctx { + u8 first_block[BLOCKCIPHER_BLOCK_SIZE]; + u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE]; + struct scatterlist *bulk_part_dst; + struct scatterlist *bulk_part_src; + struct scatterlist sg_src[2]; + struct scatterlist sg_dst[2]; + struct polyval_elem hashed_tweak; + /* + * skcipher sub-request size is unknown at compile-time, so it needs to + * go after the members with known sizes. + */ + union { + struct polyval_ctx poly_ctx; + struct skcipher_request xctr_req; + } u; +}; + +/* + * The input data for each HCTR2 hash step begins with a 16-byte block that + * contains the tweak length and a flag that indicates whether the input is evenly + * divisible into blocks. Since this implementation only supports one tweak + * length, we precompute the two hash states resulting from hashing the two + * possible values of this initial block. This reduces by one block the amount of + * data that needs to be hashed for each encryption/decryption + * + * These precomputed hashes are stored in hctr2_tfm_ctx. + */ +static void hctr2_hash_tweaklens(struct hctr2_tfm_ctx *tctx) +{ + struct polyval_ctx ctx; + + for (int has_remainder = 0; has_remainder < 2; has_remainder++) { + const __le64 tweak_length_block[2] = { + cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder), + }; + + polyval_init(&ctx, &tctx->poly_key); + polyval_update(&ctx, (const u8 *)&tweak_length_block, + sizeof(tweak_length_block)); + static_assert(sizeof(tweak_length_block) == POLYVAL_BLOCK_SIZE); + polyval_export_blkaligned( + &ctx, &tctx->hashed_tweaklens[has_remainder]); + } + memzero_explicit(&ctx, sizeof(ctx)); +} + +static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + u8 hbar[BLOCKCIPHER_BLOCK_SIZE]; + int err; + + crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tctx->blockcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(tctx->blockcipher, key, keylen); + if (err) + return err; + + crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(tctx->xctr, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(tctx->xctr, key, keylen); + if (err) + return err; + + memset(hbar, 0, sizeof(hbar)); + crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar); + + memset(tctx->L, 0, sizeof(tctx->L)); + tctx->L[0] = 0x01; + crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L); + + static_assert(sizeof(hbar) == POLYVAL_BLOCK_SIZE); + polyval_preparekey(&tctx->poly_key, hbar); + memzero_explicit(hbar, sizeof(hbar)); + + hctr2_hash_tweaklens(tctx); + return 0; +} + +static void hctr2_hash_tweak(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx; + bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE; + + polyval_import_blkaligned(poly_ctx, &tctx->poly_key, + &tctx->hashed_tweaklens[has_remainder]); + polyval_update(poly_ctx, req->iv, TWEAK_SIZE); + + // Store the hashed tweak, since we need it when computing both + // H(T || N) and H(T || V). + static_assert(TWEAK_SIZE % POLYVAL_BLOCK_SIZE == 0); + polyval_export_blkaligned(poly_ctx, &rctx->hashed_tweak); +} + +static void hctr2_hash_message(struct skcipher_request *req, + struct scatterlist *sgl, + u8 digest[POLYVAL_DIGEST_SIZE]) +{ + static const u8 padding = 0x1; + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx; + const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct sg_mapping_iter miter; + int i; + int n = 0; + + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < bulk_len; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, bulk_len - i); + polyval_update(poly_ctx, miter.addr, n); + } + sg_miter_stop(&miter); + + if (req->cryptlen % BLOCKCIPHER_BLOCK_SIZE) + polyval_update(poly_ctx, &padding, 1); + polyval_final(poly_ctx, digest); +} + +static int hctr2_finish(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx; + u8 digest[POLYVAL_DIGEST_SIZE]; + + // U = UU ^ H(T || V) + // or M = MM ^ H(T || N) + polyval_import_blkaligned(poly_ctx, &tctx->poly_key, + &rctx->hashed_tweak); + hctr2_hash_message(req, rctx->bulk_part_dst, digest); + crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE); + + // Copy U (or M) into dst scatterlist + scatterwalk_map_and_copy(rctx->first_block, req->dst, + 0, BLOCKCIPHER_BLOCK_SIZE, 1); + return 0; +} + +static void hctr2_xctr_done(void *data, int err) +{ + struct skcipher_request *req = data; + + if (!err) + err = hctr2_finish(req); + + skcipher_request_complete(req, err); +} + +static int hctr2_crypt(struct skcipher_request *req, bool enc) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + + // Requests must be at least one block + if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) + return -EINVAL; + + // Copy M (or U) into a temporary buffer + scatterwalk_map_and_copy(rctx->first_block, req->src, + 0, BLOCKCIPHER_BLOCK_SIZE, 0); + + // Create scatterlists for N and V + rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src, + BLOCKCIPHER_BLOCK_SIZE); + rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + BLOCKCIPHER_BLOCK_SIZE); + + // MM = M ^ H(T || N) + // or UU = U ^ H(T || V) + hctr2_hash_tweak(req); + hctr2_hash_message(req, rctx->bulk_part_src, digest); + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + + // UU = E(MM) + // or MM = D(UU) + if (enc) + crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block, + digest); + else + crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block, + digest); + + // S = MM ^ UU ^ L + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE); + + // V = XCTR(S, N) + // or N = XCTR(S, V) + skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr); + skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src, + rctx->bulk_part_dst, bulk_len, + rctx->xctr_iv); + skcipher_request_set_callback(&rctx->u.xctr_req, + req->base.flags, + hctr2_xctr_done, req); + return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?: + hctr2_finish(req); +} + +static int hctr2_encrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, true); +} + +static int hctr2_decrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, false); +} + +static int hctr2_init_tfm(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *xctr; + struct crypto_cipher *blockcipher; + int err; + + xctr = crypto_spawn_skcipher(&ictx->xctr_spawn); + if (IS_ERR(xctr)) + return PTR_ERR(xctr); + + blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); + if (IS_ERR(blockcipher)) { + err = PTR_ERR(blockcipher); + goto err_free_xctr; + } + + tctx->xctr = xctr; + tctx->blockcipher = blockcipher; + + BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) != + sizeof(struct hctr2_request_ctx)); + crypto_skcipher_set_reqsize( + tfm, max(sizeof(struct hctr2_request_ctx), + offsetofend(struct hctr2_request_ctx, u.xctr_req) + + crypto_skcipher_reqsize(xctr))); + return 0; + +err_free_xctr: + crypto_free_skcipher(xctr); + return err; +} + +static void hctr2_exit_tfm(struct crypto_skcipher *tfm) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(tctx->blockcipher); + crypto_free_skcipher(tctx->xctr); +} + +static void hctr2_free_instance(struct skcipher_instance *inst) +{ + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_cipher(&ictx->blockcipher_spawn); + crypto_drop_skcipher(&ictx->xctr_spawn); + kfree(inst); +} + +static int hctr2_create_common(struct crypto_template *tmpl, struct rtattr **tb, + const char *xctr_name) +{ + struct skcipher_alg_common *xctr_alg; + u32 mask; + struct skcipher_instance *inst; + struct hctr2_instance_ctx *ictx; + struct crypto_alg *blockcipher_alg; + char blockcipher_name[CRYPTO_MAX_ALG_NAME]; + int len; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = skcipher_instance_ctx(inst); + + /* Stream cipher, xctr(block_cipher) */ + err = crypto_grab_skcipher(&ictx->xctr_spawn, + skcipher_crypto_instance(inst), + xctr_name, 0, mask); + if (err) + goto err_free_inst; + xctr_alg = crypto_spawn_skcipher_alg_common(&ictx->xctr_spawn); + + err = -EINVAL; + if (strncmp(xctr_alg->base.cra_name, "xctr(", 5)) + goto err_free_inst; + len = strscpy(blockcipher_name, xctr_alg->base.cra_name + 5, + sizeof(blockcipher_name)); + if (len < 1) + goto err_free_inst; + if (blockcipher_name[len - 1] != ')') + goto err_free_inst; + blockcipher_name[len - 1] = 0; + + /* Block cipher, e.g. "aes" */ + err = crypto_grab_cipher(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst), + blockcipher_name, 0, mask); + if (err) + goto err_free_inst; + blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); + + /* Require blocksize of 16 bytes */ + err = -EINVAL; + if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) + goto err_free_inst; + + /* Instance fields */ + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "hctr2(%s)", + blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "hctr2_base(%s,polyval-lib)", + xctr_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; + inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx); + inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask; + inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority + + blockcipher_alg->cra_priority) / + 3; + + inst->alg.setkey = hctr2_setkey; + inst->alg.encrypt = hctr2_encrypt; + inst->alg.decrypt = hctr2_decrypt; + inst->alg.init = hctr2_init_tfm; + inst->alg.exit = hctr2_exit_tfm; + inst->alg.min_keysize = xctr_alg->min_keysize; + inst->alg.max_keysize = xctr_alg->max_keysize; + inst->alg.ivsize = TWEAK_SIZE; + + inst->free = hctr2_free_instance; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +err_free_inst: + hctr2_free_instance(inst); + } + return err; +} + +static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *xctr_name; + const char *polyval_name; + + xctr_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(xctr_name)) + return PTR_ERR(xctr_name); + + polyval_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(polyval_name)) + return PTR_ERR(polyval_name); + if (strcmp(polyval_name, "polyval") != 0 && + strcmp(polyval_name, "polyval-lib") != 0) + return -ENOENT; + + return hctr2_create_common(tmpl, tb, xctr_name); +} + +static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *blockcipher_name; + char xctr_name[CRYPTO_MAX_ALG_NAME]; + + blockcipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(blockcipher_name)) + return PTR_ERR(blockcipher_name); + + if (snprintf(xctr_name, CRYPTO_MAX_ALG_NAME, "xctr(%s)", + blockcipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return hctr2_create_common(tmpl, tb, xctr_name); +} + +static struct crypto_template hctr2_tmpls[] = { + { + /* hctr2_base(xctr_name, polyval_name) */ + .name = "hctr2_base", + .create = hctr2_create_base, + .module = THIS_MODULE, + }, { + /* hctr2(blockcipher_name) */ + .name = "hctr2", + .create = hctr2_create, + .module = THIS_MODULE, + } +}; + +static int __init hctr2_module_init(void) +{ + return crypto_register_templates(hctr2_tmpls, ARRAY_SIZE(hctr2_tmpls)); +} + +static void __exit hctr2_module_exit(void) +{ + return crypto_unregister_templates(hctr2_tmpls, + ARRAY_SIZE(hctr2_tmpls)); +} + +module_init(hctr2_module_init); +module_exit(hctr2_module_exit); + +MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("hctr2"); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/hkdf.c b/crypto/hkdf.c new file mode 100644 index 000000000000..82d1b32ca6ce --- /dev/null +++ b/crypto/hkdf.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation + * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010): + * "Cryptographic Extraction and Key Derivation: The HKDF Scheme". + * + * Copyright 2019 Google LLC + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <crypto/hkdf.h> +#include <linux/module.h> + +/* + * HKDF consists of two steps: + * + * 1. HKDF-Extract: extract a pseudorandom key from the input keying material + * and optional salt. + * 2. HKDF-Expand: expand the pseudorandom key into output keying material of + * any length, parameterized by an application-specific info string. + * + */ + +/** + * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2) + * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The + * caller is responsible for setting the @prk afterwards. + * @ikm: input keying material + * @ikmlen: length of @ikm + * @salt: input salt value + * @saltlen: length of @salt + * @prk: resulting pseudorandom key + * + * Extracts a pseudorandom key @prk from the input keying material + * @ikm with length @ikmlen and salt @salt with length @saltlen. + * The length of @prk is given by the digest size of @hmac_tfm. + * For an 'unsalted' version of HKDF-Extract @salt must be set + * to all zeroes and @saltlen must be set to the length of @prk. + * + * Returns 0 on success with the pseudorandom key stored in @prk, + * or a negative errno value otherwise. + */ +int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, + unsigned int ikmlen, const u8 *salt, unsigned int saltlen, + u8 *prk) +{ + int err; + + err = crypto_shash_setkey(hmac_tfm, salt, saltlen); + if (!err) + err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk); + + return err; +} +EXPORT_SYMBOL_GPL(hkdf_extract); + +/** + * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3) + * @hmac_tfm: hash context keyed with pseudorandom key + * @info: application-specific information + * @infolen: length of @info + * @okm: output keying material + * @okmlen: length of @okm + * + * This expands the pseudorandom key, which was already keyed into @hmac_tfm, + * into @okmlen bytes of output keying material parameterized by the + * application-specific @info of length @infolen bytes. + * This is thread-safe and may be called by multiple threads in parallel. + * + * Returns 0 on success with output keying material stored in @okm, + * or a negative errno value otherwise. + */ +int hkdf_expand(struct crypto_shash *hmac_tfm, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen) +{ + SHASH_DESC_ON_STACK(desc, hmac_tfm); + unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm); + int err; + const u8 *prev = NULL; + u8 counter = 1; + u8 tmp[HASH_MAX_DIGESTSIZE] = {}; + + if (WARN_ON(okmlen > 255 * hashlen)) + return -EINVAL; + + desc->tfm = hmac_tfm; + + for (i = 0; i < okmlen; i += hashlen) { + err = crypto_shash_init(desc); + if (err) + goto out; + + if (prev) { + err = crypto_shash_update(desc, prev, hashlen); + if (err) + goto out; + } + + if (infolen) { + err = crypto_shash_update(desc, info, infolen); + if (err) + goto out; + } + + BUILD_BUG_ON(sizeof(counter) != 1); + if (okmlen - i < hashlen) { + err = crypto_shash_finup(desc, &counter, 1, tmp); + if (err) + goto out; + memcpy(&okm[i], tmp, okmlen - i); + memzero_explicit(tmp, sizeof(tmp)); + } else { + err = crypto_shash_finup(desc, &counter, 1, &okm[i]); + if (err) + goto out; + } + counter++; + prev = &okm[i]; + } + err = 0; +out: + if (unlikely(err)) + memzero_explicit(okm, okmlen); /* so caller doesn't need to */ + shash_desc_zero(desc); + memzero_explicit(tmp, HASH_MAX_DIGESTSIZE); + return err; +} +EXPORT_SYMBOL_GPL(hkdf_expand); + +struct hkdf_testvec { + const char *test; + const u8 *ikm; + const u8 *salt; + const u8 *info; + const u8 *prk; + const u8 *okm; + u16 ikm_size; + u16 salt_size; + u16 info_size; + u16 prk_size; + u16 okm_size; +}; + +/* + * HKDF test vectors from RFC5869 + * + * Additional HKDF test vectors from + * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md + */ +static const struct hkdf_testvec hkdf_sha256_tv[] = { + { + .test = "basic hdkf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63" + "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5", + .prk_size = 32, + .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a" + "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf" + "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c" + "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44", + .prk_size = 32, + .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34" + "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c" + "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09" + "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71" + "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f" + "\x1d\x87", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf" + "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04", + .prk_size = 32, + .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31" + "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d" + "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c" + "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf", + .prk_size = 32, + .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43" + "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d" + "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 32, + .info = NULL, + .info_size = 0, + .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d" + "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf", + .prk_size = 32, + .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53" + "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a" + "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7", + .okm_size = 42, + } +}; + +static const struct hkdf_testvec hkdf_sha384_tv[] = { + { + .test = "basic hkdf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30" + "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde" + "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde", + .prk_size = 48, + .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38" + "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7" + "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3" + "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b" + "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb", + .prk_size = 48, + .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81" + "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee" + "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49" + "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6" + "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a" + "\x8b\x2e", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d" + "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b" + "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5", + .prk_size = 48, + .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf" + "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa" + "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0" + "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a" + "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72", + .prk_size = 48, + .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75" + "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1" + "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 48, + .info = NULL, + .info_size = 0, + .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e" + "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc" + "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5", + .prk_size = 48, + .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78" + "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b" + "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53", + .okm_size = 42, + } +}; + +static const struct hkdf_testvec hkdf_sha512_tv[] = { + { + .test = "basic hkdf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b" + "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26" + "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f" + "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37", + .prk_size = 64, + .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4" + "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93" + "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d" + "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e" + "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe" + "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a", + .prk_size = 64, + .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67" + "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1" + "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a" + "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35" + "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e" + "\x7a\x93", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21" + "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b" + "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde" + "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6", + .prk_size = 64, + .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c" + "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f" + "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f" + "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f" + "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2" + "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d", + .prk_size = 64, + .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff" + "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35" + "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 64, + .info = NULL, + .info_size = 0, + .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89" + "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa" + "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1" + "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c", + .prk_size = 64, + .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90" + "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a" + "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb", + .okm_size = 42, + } +}; + +static int hkdf_test(const char *shash, const struct hkdf_testvec *tv) +{ struct crypto_shash *tfm = NULL; + u8 *prk = NULL, *okm = NULL; + unsigned int prk_size; + const char *driver; + int err; + + tfm = crypto_alloc_shash(shash, 0, 0); + if (IS_ERR(tfm)) { + pr_err("%s(%s): failed to allocate transform: %ld\n", + tv->test, shash, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + driver = crypto_shash_driver_name(tfm); + + prk_size = crypto_shash_digestsize(tfm); + prk = kzalloc(prk_size, GFP_KERNEL); + if (!prk) { + err = -ENOMEM; + goto out_free; + } + + if (tv->prk_size != prk_size) { + pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n", + tv->test, driver, tv->prk_size, prk_size); + err = -EINVAL; + goto out_free; + } + + err = hkdf_extract(tfm, tv->ikm, tv->ikm_size, + tv->salt, tv->salt_size, prk); + if (err) { + pr_err("%s(%s): hkdf_extract failed with %d\n", + tv->test, driver, err); + goto out_free; + } + + if (memcmp(prk, tv->prk, tv->prk_size)) { + pr_err("%s(%s): hkdf_extract prk mismatch\n", + tv->test, driver); + print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE, + 16, 1, prk, tv->prk_size, false); + err = -EINVAL; + goto out_free; + } + + okm = kzalloc(tv->okm_size, GFP_KERNEL); + if (!okm) { + err = -ENOMEM; + goto out_free; + } + + err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size); + if (err) { + pr_err("%s(%s): failed to set prk, error %d\n", + tv->test, driver, err); + goto out_free; + } + + err = hkdf_expand(tfm, tv->info, tv->info_size, + okm, tv->okm_size); + if (err) { + pr_err("%s(%s): hkdf_expand() failed with %d\n", + tv->test, driver, err); + } else if (memcmp(okm, tv->okm, tv->okm_size)) { + pr_err("%s(%s): hkdf_expand() okm mismatch\n", + tv->test, driver); + print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE, + 16, 1, okm, tv->okm_size, false); + err = -EINVAL; + } +out_free: + kfree(okm); + kfree(prk); + crypto_free_shash(tfm); + return err; +} + +static int __init crypto_hkdf_module_init(void) +{ + int ret = 0, i; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return 0; + + for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) { + ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]); + if (ret) + return ret; + } + for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) { + ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]); + if (ret) + return ret; + } + for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) { + ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]); + if (ret) + return ret; + } + return 0; +} + +static void __exit crypto_hkdf_module_exit(void) {} + +late_initcall(crypto_hkdf_module_init); +module_exit(crypto_hkdf_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)"); diff --git a/crypto/hmac.c b/crypto/hmac.c index 25856aa7ccbf..148af460ae97 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -13,29 +13,24 @@ #include <crypto/hmac.h> #include <crypto/internal/hash.h> -#include <crypto/scatterwalk.h> #include <linux/err.h> -#include <linux/init.h> +#include <linux/fips.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/string.h> struct hmac_ctx { struct crypto_shash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; }; -static inline void *align_ptr(void *p, unsigned int align) -{ - return (void *)ALIGN((unsigned long)p, align); -} - -static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) -{ - return align_ptr(crypto_shash_ctx_aligned(tfm) + - crypto_shash_statesize(tfm) * 2, - crypto_tfm_ctx_alignment()); -} +struct ahash_hmac_ctx { + struct crypto_ahash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; +}; static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) @@ -43,13 +38,15 @@ static int hmac_setkey(struct crypto_shash *parent, int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *ipad = crypto_shash_ctx_aligned(parent); - char *opad = ipad + ss; - struct hmac_ctx *ctx = align_ptr(opad + ss, - crypto_tfm_ctx_alignment()); - struct crypto_shash *hash = ctx->hash; + struct hmac_ctx *tctx = crypto_shash_ctx(parent); + struct crypto_shash *hash = tctx->hash; + u8 *ipad = &tctx->pads[0]; + u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); - unsigned int i; + int err, i; + + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; shash->tfm = hash; @@ -72,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent, opad[i] ^= HMAC_OPAD_VALUE; } - return crypto_shash_init(shash) ?: - crypto_shash_update(shash, ipad, bs) ?: - crypto_shash_export(shash, ipad) ?: - crypto_shash_init(shash) ?: - crypto_shash_update(shash, opad, bs) ?: - crypto_shash_export(shash, opad); + err = crypto_shash_init(shash) ?: + crypto_shash_update(shash, ipad, bs) ?: + crypto_shash_export(shash, ipad) ?: + crypto_shash_init(shash) ?: + crypto_shash_update(shash, opad, bs) ?: + crypto_shash_export(shash, opad); + shash_desc_zero(shash); + return err; } static int hmac_export(struct shash_desc *pdesc, void *out) @@ -90,37 +89,42 @@ static int hmac_export(struct shash_desc *pdesc, void *out) static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); - struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); - desc->tfm = ctx->hash; + desc->tfm = tctx->hash; return crypto_shash_import(desc, in); } -static int hmac_init(struct shash_desc *pdesc) +static int hmac_export_core(struct shash_desc *pdesc, void *out) { - return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); + struct shash_desc *desc = shash_desc_ctx(pdesc); + + return crypto_shash_export_core(desc, out); } -static int hmac_update(struct shash_desc *pdesc, - const u8 *data, unsigned int nbytes) +static int hmac_import_core(struct shash_desc *pdesc, const void *in) { + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); struct shash_desc *desc = shash_desc_ctx(pdesc); - return crypto_shash_update(desc, data, nbytes); + desc->tfm = tctx->hash; + return crypto_shash_import_core(desc, in); } -static int hmac_final(struct shash_desc *pdesc, u8 *out) +static int hmac_init(struct shash_desc *pdesc) +{ + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + + return hmac_import(pdesc, &tctx->pads[0]); +} + +static int hmac_update(struct shash_desc *pdesc, + const u8 *data, unsigned int nbytes) { - struct crypto_shash *parent = pdesc->tfm; - int ds = crypto_shash_digestsize(parent); - int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); - return crypto_shash_final(desc, out) ?: - crypto_shash_import(desc, opad) ?: - crypto_shash_finup(desc, out, ds, out); + return crypto_shash_update(desc, data, nbytes); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, @@ -130,7 +134,8 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data, struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; + const struct hmac_ctx *tctx = crypto_shash_ctx(parent); + const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_finup(desc, data, nbytes, out) ?: @@ -143,45 +148,54 @@ static int hmac_init_tfm(struct crypto_shash *parent) struct crypto_shash *hash; struct shash_instance *inst = shash_alg_instance(parent); struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); - struct hmac_ctx *ctx = hmac_ctx(parent); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); - parent->descsize = sizeof(struct shash_desc) + - crypto_shash_descsize(hash); + tctx->hash = hash; + return 0; +} - ctx->hash = hash; +static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) +{ + struct hmac_ctx *sctx = crypto_shash_ctx(src); + struct hmac_ctx *dctx = crypto_shash_ctx(dst); + struct crypto_shash *hash; + + hash = crypto_clone_shash(sctx->hash); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + dctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_shash *parent) { - struct hmac_ctx *ctx = hmac_ctx(parent); - crypto_free_shash(ctx->hash); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); + + crypto_free_shash(tctx->hash); } -static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +static int __hmac_create_shash(struct crypto_template *tmpl, + struct rtattr **tb, u32 mask) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; - u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = shash_instance_ctx(inst); + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -200,29 +214,28 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) ss < alg->cra_blocksize) goto err_free_inst; - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); + err = crypto_inst_setname(shash_crypto_instance(inst), "hmac", + "hmac-shash", alg); if (err) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2); - ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; inst->alg.statesize = ss; - - inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(ss * 2, crypto_tfm_ctx_alignment()); - + inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize; inst->alg.init = hmac_init; inst->alg.update = hmac_update; - inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; + inst->alg.export_core = hmac_export_core; + inst->alg.import_core = hmac_import_core; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; + inst->alg.clone_tfm = hmac_clone_tfm; inst->alg.exit_tfm = hmac_exit_tfm; inst->free = shash_free_singlespawn_instance; @@ -235,23 +248,332 @@ err_free_inst: return err; } -static struct crypto_template hmac_tmpl = { - .name = "hmac", - .create = hmac_create, - .module = THIS_MODULE, +static int hmac_setkey_ahash(struct crypto_ahash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash); + int ds = crypto_ahash_digestsize(parent); + int bs = crypto_ahash_blocksize(parent); + int ss = crypto_ahash_statesize(parent); + HASH_REQUEST_ON_STACK(req, fb); + u8 *opad = &tctx->pads[ss]; + u8 *ipad = &tctx->pads[0]; + int err, i; + + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; + + ahash_request_set_callback(req, 0, NULL, NULL); + + if (keylen > bs) { + ahash_request_set_virt(req, inkey, ipad, keylen); + err = crypto_ahash_digest(req); + if (err) + goto out_zero_req; + + keylen = ds; + } else + memcpy(ipad, inkey, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + ahash_request_set_virt(req, ipad, NULL, bs); + err = crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, ipad); + + ahash_request_set_virt(req, opad, NULL, bs); + err = err ?: + crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, opad); + +out_zero_req: + HASH_REQUEST_ZERO(req); + return err; +} + +static int hmac_export_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export(ahash_request_ctx(preq), out); +} + +static int hmac_import_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import(req, in); +} + +static int hmac_export_core_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export_core(ahash_request_ctx(preq), out); +} + +static int hmac_import_core_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import_core(req, in); +} + +static int hmac_init_ahash(struct ahash_request *preq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + + return hmac_import_ahash(preq, &tctx->pads[0]); +} + +static int hmac_update_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + preq->base.complete, preq->base.data); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes); + return crypto_ahash_update(req); +} + +static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_request *req = ahash_request_ctx(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + int ds = crypto_ahash_digestsize(tfm); + int ss = crypto_ahash_statesize(tfm); + const u8 *opad = &tctx->pads[ss]; + + ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask, + preq->base.complete, preq->base.data); + ahash_request_set_virt(req, preq->result, preq->result, ds); + return crypto_ahash_import(req, opad) ?: + crypto_ahash_finup(req); + +} + +static void hmac_finup_done(void *data, int err) +{ + struct ahash_request *preq = data; + + if (err) + goto out; + + err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + ahash_request_complete(preq, err); +} + +static int hmac_finup_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + hmac_finup_done, preq); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, preq->result, + preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, preq->result, + preq->nbytes); + return crypto_ahash_finup(req) ?: + hmac_finup_finish(preq, 0); +} + +static int hmac_digest_ahash(struct ahash_request *preq) +{ + return hmac_init_ahash(preq) ?: + hmac_finup_ahash(preq); +} + +static int hmac_init_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_instance *inst = ahash_alg_instance(parent); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *hash; + + hash = crypto_spawn_ahash(ahash_instance_ctx(inst)); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) + + crypto_ahash_reqsize(hash)) + return -EINVAL; + + tctx->hash = hash; + return 0; +} + +static int hmac_clone_ahash_tfm(struct crypto_ahash *dst, + struct crypto_ahash *src) +{ + struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src); + struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst); + struct crypto_ahash *hash; + + hash = crypto_clone_ahash(sctx->hash); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + dctx->hash = hash; + return 0; +} + +static void hmac_exit_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + + crypto_free_ahash(tctx->hash); +} + +static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb, + u32 mask) +{ + struct crypto_ahash_spawn *spawn; + struct ahash_instance *inst; + struct crypto_alg *alg; + struct hash_alg_common *halg; + int ds, ss, err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = ahash_instance_ctx(inst); + + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + halg = crypto_spawn_ahash_alg(spawn); + alg = &halg->base; + + /* The underlying hash algorithm must not require a key */ + err = -EINVAL; + if (crypto_hash_alg_needs_key(halg)) + goto err_free_inst; + + ds = halg->digestsize; + ss = halg->statesize; + if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) + goto err_free_inst; + + err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg); + if (err) + goto err_free_inst; + + inst->alg.halg.base.cra_flags = alg->cra_flags & + CRYPTO_ALG_INHERITED_FLAGS; + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT; + inst->alg.halg.base.cra_priority = alg->cra_priority + 100; + inst->alg.halg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) + + (ss * 2); + inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) + + alg->cra_reqsize; + + inst->alg.halg.digestsize = ds; + inst->alg.halg.statesize = ss; + inst->alg.init = hmac_init_ahash; + inst->alg.update = hmac_update_ahash; + inst->alg.finup = hmac_finup_ahash; + inst->alg.digest = hmac_digest_ahash; + inst->alg.export = hmac_export_ahash; + inst->alg.import = hmac_import_ahash; + inst->alg.export_core = hmac_export_core_ahash; + inst->alg.import_core = hmac_import_core_ahash; + inst->alg.setkey = hmac_setkey_ahash; + inst->alg.init_tfm = hmac_init_ahash_tfm; + inst->alg.clone_tfm = hmac_clone_ahash_tfm; + inst->alg.exit_tfm = hmac_exit_ahash_tfm; + + inst->free = ahash_free_singlespawn_instance; + + err = ahash_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ahash_free_singlespawn_instance(inst); + } + return err; +} + +static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + u32 mask; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = crypto_algt_inherited_mask(algt); + + if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK)) + return hmac_create_ahash(tmpl, tb, mask); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK) + return -EINVAL; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); + if (err) + return err == -EINVAL ? -ENOENT : err; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static struct crypto_template hmac_tmpls[] = { + { + .name = "hmac", + .create = hmac_create, + .module = THIS_MODULE, + }, + { + .name = "hmac-shash", + .create = hmac_create_shash, + .module = THIS_MODULE, + }, }; static int __init hmac_module_init(void) { - return crypto_register_template(&hmac_tmpl); + return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } static void __exit hmac_module_exit(void) { - crypto_unregister_template(&hmac_tmpl); + crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } -subsys_initcall(hmac_module_init); +module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/internal.h b/crypto/internal.h index f00869af689f..b9afd68767c1 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -10,12 +10,15 @@ #include <crypto/algapi.h> #include <linux/completion.h> +#include <linux/err.h> +#include <linux/jump_label.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/numa.h> #include <linux/refcount.h> #include <linux/rwsem.h> +#include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/types.h> @@ -27,6 +30,23 @@ struct crypto_larval { struct crypto_alg *adult; struct completion completion; u32 mask; + bool test_started; +}; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + void (*destroy)(struct crypto_alg *alg); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; + unsigned int algsize; }; enum { @@ -45,6 +65,30 @@ extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + +#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) +static inline bool crypto_boot_test_finished(void) +{ + return true; +} +static inline void set_crypto_boot_test_finished(void) +{ +} +#else +DECLARE_STATIC_KEY_FALSE(__crypto_boot_test_finished); +static inline bool crypto_boot_test_finished(void) +{ + return static_branch_likely(&__crypto_boot_test_finished); +} +static inline void set_crypto_boot_test_finished(void) +{ + static_branch_enable(&__crypto_boot_test_finished); +} +#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || + * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) + */ + #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); void __exit crypto_exit_proc(void); @@ -69,17 +113,21 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); -void crypto_larval_kill(struct crypto_alg *alg); +void crypto_schedule_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); +struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, + u32 mask, gfp_t gfp); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm_node(struct crypto_alg *alg, const struct crypto_type *frontend, int node); +void *crypto_clone_tfm(const struct crypto_type *frontend, + struct crypto_tfm *otfm); static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) @@ -114,10 +162,12 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) return alg; } +void crypto_destroy_alg(struct crypto_alg *alg); + static inline void crypto_alg_put(struct crypto_alg *alg) { - if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) - alg->cra_destroy(alg); + if (refcount_dec_and_test(&alg->cra_refcnt)) + crypto_destroy_alg(alg); } static inline int crypto_tmpl_get(struct crypto_template *tmpl) @@ -156,5 +206,15 @@ static inline void crypto_yield(u32 flags) cond_resched(); } +static inline int crypto_is_test_larval(struct crypto_larval *larval) +{ + return larval->alg.cra_driver_name[0]; +} + +static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm) +{ + return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW); +} + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index e8a4165a1874..7c880cf34c52 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -2,7 +2,7 @@ * Non-physical true random number generator based on timing jitter -- * Linux Kernel Crypto API specific code * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,42 +37,41 @@ * DAMAGE. */ +#include <crypto/hash.h> +#include <crypto/sha3.h> +#include <linux/fips.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/fips.h> #include <linux/time.h> #include <crypto/internal/rng.h> #include "jitterentropy.h" +#define JENT_CONDITIONING_HASH "sha3-256" + /*************************************************************************** * Helper function ***************************************************************************/ -void *jent_zalloc(unsigned int len) +void *jent_kvzalloc(unsigned int len) { - return kzalloc(len, GFP_KERNEL); + return kvzalloc(len, GFP_KERNEL); } -void jent_zfree(void *ptr) +void jent_kvzfree(void *ptr, unsigned int len) { - kfree_sensitive(ptr); + kvfree_sensitive(ptr, len); } -int jent_fips_enabled(void) -{ - return fips_enabled; -} - -void jent_panic(char *s) +void *jent_zalloc(unsigned int len) { - panic("%s", s); + return kzalloc(len, GFP_KERNEL); } -void jent_memcpy(void *dest, const void *src, unsigned int n) +void jent_zfree(void *ptr) { - memcpy(dest, src, n); + kfree_sensitive(ptr); } /* @@ -99,6 +98,94 @@ void jent_get_nstime(__u64 *out) tmp = ktime_get_ns(); *out = tmp; + jent_raw_hires_entropy_store(tmp); +} + +int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + unsigned int addtl_len, __u64 hash_loop_cnt, + unsigned int stuck) +{ + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; + SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm); + u8 intermediary[SHA3_256_DIGEST_SIZE]; + __u64 j = 0; + int ret; + + desc->tfm = hash_state_desc->tfm; + + if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) { + pr_warn_ratelimited("Unexpected digest size\n"); + return -EINVAL; + } + kmsan_unpoison_memory(intermediary, sizeof(intermediary)); + + /* + * This loop fills a buffer which is injected into the entropy pool. + * The main reason for this loop is to execute something over which we + * can perform a timing measurement. The injection of the resulting + * data into the pool is performed to ensure the result is used and + * the compiler cannot optimize the loop away in case the result is not + * used at all. Yet that data is considered "additional information" + * considering the terminology from SP800-90A without any entropy. + * + * Note, it does not matter which or how much data you inject, we are + * interested in one Keccack1600 compression operation performed with + * the crypto_shash_final. + */ + for (j = 0; j < hash_loop_cnt; j++) { + ret = crypto_shash_init(desc) ?: + crypto_shash_update(desc, intermediary, + sizeof(intermediary)) ?: + crypto_shash_finup(desc, addtl, addtl_len, intermediary); + if (ret) + goto err; + } + + /* + * Inject the data from the previous loop into the pool. This data is + * not considered to contain any entropy, but it stirs the pool a bit. + */ + ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary)); + if (ret) + goto err; + + /* + * Insert the time stamp into the hash context representing the pool. + * + * If the time stamp is stuck, do not finally insert the value into the + * entropy pool. Although this operation should not do any harm even + * when the time stamp has no entropy, SP800-90B requires that any + * conditioning operation to have an identical amount of input data + * according to section 3.1.5. + */ + if (stuck) { + time = 0; + } + + ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64)); + +err: + shash_desc_zero(desc); + memzero_explicit(intermediary, sizeof(intermediary)); + + return ret; +} + +int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len) +{ + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; + u8 jent_block[SHA3_256_DIGEST_SIZE]; + /* Obtain data from entropy pool and re-initialize it */ + int ret = crypto_shash_final(hash_state_desc, jent_block) ?: + crypto_shash_init(hash_state_desc) ?: + crypto_shash_update(hash_state_desc, jent_block, + sizeof(jent_block)); + + if (!ret && dst_len) + memcpy(dst, jent_block, dst_len); + + memzero_explicit(jent_block, sizeof(jent_block)); + return ret; } /*************************************************************************** @@ -108,33 +195,76 @@ void jent_get_nstime(__u64 *out) struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; - unsigned int reset_cnt; + struct crypto_shash *tfm; + struct shash_desc *sdesc; }; -static int jent_kcapi_init(struct crypto_tfm *tfm) +static void jent_kcapi_cleanup(struct crypto_tfm *tfm) { struct jitterentropy *rng = crypto_tfm_ctx(tfm); - int ret = 0; - rng->entropy_collector = jent_entropy_collector_alloc(1, 0); - if (!rng->entropy_collector) - ret = -ENOMEM; + spin_lock(&rng->jent_lock); - spin_lock_init(&rng->jent_lock); - return ret; -} + if (rng->sdesc) { + shash_desc_zero(rng->sdesc); + kfree(rng->sdesc); + } + rng->sdesc = NULL; -static void jent_kcapi_cleanup(struct crypto_tfm *tfm) -{ - struct jitterentropy *rng = crypto_tfm_ctx(tfm); + if (rng->tfm) + crypto_free_shash(rng->tfm); + rng->tfm = NULL; - spin_lock(&rng->jent_lock); if (rng->entropy_collector) jent_entropy_collector_free(rng->entropy_collector); rng->entropy_collector = NULL; spin_unlock(&rng->jent_lock); } +static int jent_kcapi_init(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + struct crypto_shash *hash; + struct shash_desc *sdesc; + int size, ret = 0; + + spin_lock_init(&rng->jent_lock); + + /* Use SHA3-256 as conditioner */ + hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); + if (IS_ERR(hash)) { + pr_err("Cannot allocate conditioning digest\n"); + return PTR_ERR(hash); + } + rng->tfm = hash; + + size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); + sdesc = kmalloc(size, GFP_KERNEL); + if (!sdesc) { + ret = -ENOMEM; + goto err; + } + + sdesc->tfm = hash; + crypto_shash_init(sdesc); + rng->sdesc = sdesc; + + rng->entropy_collector = + jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, + sdesc); + if (!rng->entropy_collector) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&rng->jent_lock); + return 0; + +err: + jent_kcapi_cleanup(tfm); + return ret; +} + static int jent_kcapi_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) @@ -144,32 +274,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm, spin_lock(&rng->jent_lock); - /* Return a permanent error in case we had too many resets in a row. */ - if (rng->reset_cnt > (1<<10)) { - ret = -EFAULT; - goto out; - } - ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); - /* Reset RNG in case of health failures */ - if (ret < -1) { - pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n", - (ret == -2) ? "Repetition Count Test" : - "Adaptive Proportion Test"); - - rng->reset_cnt++; - + if (ret == -3) { + /* Handle permanent health test error */ + /* + * If the kernel was booted with fips=1, it implies that + * the entire kernel acts as a FIPS 140 module. In this case + * an SP800-90B permanent health test error is treated as + * a FIPS module error. + */ + if (fips_enabled) + panic("Jitter RNG permanent health test failure\n"); + + pr_err("Jitter RNG permanent health test failure\n"); + ret = -EFAULT; + } else if (ret == -2) { + /* Handle intermittent health test error */ + pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n"); ret = -EAGAIN; - } else { - rng->reset_cnt = 0; - - /* Convert the Jitter RNG error into a usable error code */ - if (ret == -1) - ret = -EINVAL; + } else if (ret == -1) { + /* Handle other errors */ + ret = -EINVAL; } -out: spin_unlock(&rng->jent_lock); return ret; @@ -193,16 +321,34 @@ static struct rng_alg jent_alg = { .cra_module = THIS_MODULE, .cra_init = jent_kcapi_init, .cra_exit = jent_kcapi_cleanup, - } }; static int __init jent_mod_init(void) { + SHASH_DESC_ON_STACK(desc, tfm); + struct crypto_shash *tfm; int ret = 0; - ret = jent_entropy_init(); + jent_testing_init(); + + tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); + if (IS_ERR(tfm)) { + jent_testing_exit(); + return PTR_ERR(tfm); + } + + desc->tfm = tfm; + crypto_shash_init(desc); + ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL); + shash_desc_zero(desc); + crypto_free_shash(tfm); if (ret) { + /* Handle permanent health test error */ + if (fips_enabled) + panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); + + jent_testing_exit(); pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); return -EFAULT; } @@ -211,6 +357,7 @@ static int __init jent_mod_init(void) static void __exit jent_mod_exit(void) { + jent_testing_exit(); crypto_unregister_rng(&jent_alg); } diff --git a/crypto/jitterentropy-testing.c b/crypto/jitterentropy-testing.c new file mode 100644 index 000000000000..21c9d7c3269a --- /dev/null +++ b/crypto/jitterentropy-testing.c @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Test interface for Jitter RNG. + * + * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de> + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/uaccess.h> + +#include "jitterentropy.h" + +#define JENT_TEST_RINGBUFFER_SIZE (1<<10) +#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1) + +struct jent_testing { + u64 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE]; + u32 rb_reader; + atomic_t rb_writer; + atomic_t jent_testing_enabled; + spinlock_t lock; + wait_queue_head_t read_wait; +}; + +static struct dentry *jent_raw_debugfs_root = NULL; + +/*************************** Generic Data Handling ****************************/ + +/* + * boot variable: + * 0 ==> No boot test, gathering of runtime data allowed + * 1 ==> Boot test enabled and ready for collecting data, gathering runtime + * data is disabled + * 2 ==> Boot test completed and disabled, gathering of runtime data is + * disabled + */ + +static void jent_testing_reset(struct jent_testing *data) +{ + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + data->rb_reader = 0; + atomic_set(&data->rb_writer, 0); + spin_unlock_irqrestore(&data->lock, flags); +} + +static void jent_testing_data_init(struct jent_testing *data, u32 boot) +{ + /* + * The boot time testing implies we have a running test. If the + * caller wants to clear it, he has to unset the boot_test flag + * at runtime via sysfs to enable regular runtime testing + */ + if (boot) + return; + + jent_testing_reset(data); + atomic_set(&data->jent_testing_enabled, 1); + pr_warn("Enabling data collection\n"); +} + +static void jent_testing_fini(struct jent_testing *data, u32 boot) +{ + /* If we have boot data, we do not reset yet to allow data to be read */ + if (boot) + return; + + atomic_set(&data->jent_testing_enabled, 0); + jent_testing_reset(data); + pr_warn("Disabling data collection\n"); +} + +static bool jent_testing_store(struct jent_testing *data, u64 value, + u32 *boot) +{ + unsigned long flags; + + if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1)) + return false; + + spin_lock_irqsave(&data->lock, flags); + + /* + * Disable entropy testing for boot time testing after ring buffer + * is filled. + */ + if (*boot) { + if (((u32)atomic_read(&data->rb_writer)) > + JENT_TEST_RINGBUFFER_SIZE) { + *boot = 2; + pr_warn_once("One time data collection test disabled\n"); + spin_unlock_irqrestore(&data->lock, flags); + return false; + } + + if (atomic_read(&data->rb_writer) == 1) + pr_warn("One time data collection test enabled\n"); + } + + data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) & + JENT_TEST_RINGBUFFER_MASK] = value; + atomic_inc(&data->rb_writer); + + spin_unlock_irqrestore(&data->lock, flags); + + if (wq_has_sleeper(&data->read_wait)) + wake_up_interruptible(&data->read_wait); + + return true; +} + +static bool jent_testing_have_data(struct jent_testing *data) +{ + return ((((u32)atomic_read(&data->rb_writer)) & + JENT_TEST_RINGBUFFER_MASK) != + (data->rb_reader & JENT_TEST_RINGBUFFER_MASK)); +} + +static int jent_testing_reader(struct jent_testing *data, u32 *boot, + u8 *outbuf, u32 outbuflen) +{ + unsigned long flags; + int collected_data = 0; + + jent_testing_data_init(data, *boot); + + while (outbuflen) { + u32 writer = (u32)atomic_read(&data->rb_writer); + + spin_lock_irqsave(&data->lock, flags); + + /* We have no data or reached the writer. */ + if (!writer || (writer == data->rb_reader)) { + + spin_unlock_irqrestore(&data->lock, flags); + + /* + * Now we gathered all boot data, enable regular data + * collection. + */ + if (*boot) { + *boot = 0; + goto out; + } + + wait_event_interruptible(data->read_wait, + jent_testing_have_data(data)); + if (signal_pending(current)) { + collected_data = -ERESTARTSYS; + goto out; + } + + continue; + } + + /* We copy out word-wise */ + if (outbuflen < sizeof(u64)) { + spin_unlock_irqrestore(&data->lock, flags); + goto out; + } + + memcpy(outbuf, &data->jent_testing_rb[data->rb_reader], + sizeof(u64)); + data->rb_reader++; + + spin_unlock_irqrestore(&data->lock, flags); + + outbuf += sizeof(u64); + outbuflen -= sizeof(u64); + collected_data += sizeof(u64); + } + +out: + jent_testing_fini(data, *boot); + return collected_data; +} + +static int jent_testing_extract_user(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos, + int (*reader)(u8 *outbuf, u32 outbuflen)) +{ + u8 *tmp, *tmp_aligned; + int ret = 0, large_request = (nbytes > 256); + + if (!nbytes) + return 0; + + /* + * The intention of this interface is for collecting at least + * 1000 samples due to the SP800-90B requirements. However, due to + * memory and performance constraints, it is not desirable to allocate + * 8000 bytes of memory. Instead, we allocate space for only 125 + * samples, which will allow the user to collect all 1000 samples using + * 8 calls to this interface. + */ + tmp = kmalloc(125 * sizeof(u64) + sizeof(u64), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp_aligned = PTR_ALIGN(tmp, sizeof(u64)); + + while (nbytes) { + int i; + + if (large_request && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + i = min_t(int, nbytes, 125 * sizeof(u64)); + i = reader(tmp_aligned, i); + if (i <= 0) { + if (i < 0) + ret = i; + break; + } + if (copy_to_user(buf, tmp_aligned, i)) { + ret = -EFAULT; + break; + } + + nbytes -= i; + buf += i; + ret += i; + } + + kfree_sensitive(tmp); + + if (ret > 0) + *ppos += ret; + + return ret; +} + +/************** Raw High-Resolution Timer Entropy Data Handling **************/ + +static u32 boot_raw_hires_test = 0; +module_param(boot_raw_hires_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_hires_test, + "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events"); + +static struct jent_testing jent_raw_hires = { + .rb_reader = 0, + .rb_writer = ATOMIC_INIT(0), + .lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait) +}; + +int jent_raw_hires_entropy_store(__u64 value) +{ + return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test); +} +EXPORT_SYMBOL(jent_raw_hires_entropy_store); + +static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test, + outbuf, outbuflen); +} + +static ssize_t jent_raw_hires_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return jent_testing_extract_user(file, to, count, ppos, + jent_raw_hires_entropy_reader); +} + +static const struct file_operations jent_raw_hires_fops = { + .owner = THIS_MODULE, + .read = jent_raw_hires_read, +}; + +/******************************* Initialization *******************************/ + +void jent_testing_init(void) +{ + jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + + debugfs_create_file_unsafe("jent_raw_hires", 0400, + jent_raw_debugfs_root, NULL, + &jent_raw_hires_fops); +} +EXPORT_SYMBOL(jent_testing_init); + +void jent_testing_exit(void) +{ + debugfs_remove_recursive(jent_raw_debugfs_root); +} +EXPORT_SYMBOL(jent_testing_exit); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index a11b3208760f..3f93cdc9a7af 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -2,7 +2,7 @@ * Non-physical true random number generator based on timing jitter -- * Jitter RNG standalone code. * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 * * Design * ====== @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.2.0 provided at https://www.chronox.de/jent.html + * version 3.4.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ @@ -57,25 +57,28 @@ typedef unsigned long long __u64; typedef long long __s64; typedef unsigned int __u32; +typedef unsigned char u8; #define NULL ((void *) 0) /* The entropy pool */ struct rand_data { + /* SHA3-256 is used as conditioner */ +#define DATA_SIZE_BITS 256 /* all data values that are vital to maintain the security * of the RNG are marked as SENSITIVE. A user must not * access that information while the RNG executes its loops to * calculate the next random value. */ - __u64 data; /* SENSITIVE Actual random number */ - __u64 old_data; /* SENSITIVE Previous random number */ - __u64 prev_time; /* SENSITIVE Previous time stamp */ -#define DATA_SIZE_BITS ((sizeof(__u64)) * 8) - __u64 last_delta; /* SENSITIVE stuck test */ - __s64 last_delta2; /* SENSITIVE stuck test */ - unsigned int osr; /* Oversample rate */ -#define JENT_MEMORY_BLOCKS 64 -#define JENT_MEMORY_BLOCKSIZE 32 + void *hash_state; /* SENSITIVE hash state entropy pool */ + __u64 prev_time; /* SENSITIVE Previous time stamp */ + __u64 last_delta; /* SENSITIVE stuck test */ + __s64 last_delta2; /* SENSITIVE stuck test */ + + unsigned int flags; /* Flags used to initialize */ + unsigned int osr; /* Oversample rate */ #define JENT_MEMORY_ACCESSLOOPS 128 -#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) +#define JENT_MEMORY_SIZE \ + (CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \ + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE) unsigned char *mem; /* Memory access location with size of * memblocks * memblocksize */ unsigned int memlocation; /* Pointer to byte in *mem */ @@ -85,10 +88,11 @@ struct rand_data { * bit generation */ /* Repetition Count Test */ - int rct_count; /* Number of stuck values */ + unsigned int rct_count; /* Number of stuck values */ - /* Adaptive Proportion Test for a significance level of 2^-30 */ -#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ + /* Adaptive Proportion Test cutoff values */ + unsigned int apt_cutoff; /* Intermittent health test failure */ + unsigned int apt_cutoff_permanent; /* Permanent health test failure */ #define JENT_APT_WINDOW_SIZE 512 /* Data window size */ /* LSB of time stamp to process */ #define JENT_APT_LSB 16 @@ -96,9 +100,9 @@ struct rand_data { unsigned int apt_observations; /* Number of collected observations */ unsigned int apt_count; /* APT counter */ unsigned int apt_base; /* APT base reference */ - unsigned int apt_base_set:1; /* APT base reference set? */ + unsigned int health_failure; /* Record health failure */ - unsigned int health_failure:1; /* Permanent health failure */ + unsigned int apt_base_set:1; /* APT base reference set? */ }; /* Flags that can be used to initialize the RNG */ @@ -115,8 +119,35 @@ struct rand_data { * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ #define JENT_EHEALTH 9 /* Health test failed during initialization */ -#define JENT_ERCT 10 /* RCT failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ +#define JENT_EHASH 11 /* Hash self test failed */ +#define JENT_EMEM 12 /* Can't allocate memory for initialization */ + +#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */ +#define JENT_APT_FAILURE 2 /* Failure in APT health test. */ +#define JENT_PERMANENT_FAILURE_SHIFT 16 +#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT) +#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE) +#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE) + +/* + * The output n bits can receive more than n bits of min entropy, of course, + * but the fixed output of the conditioning function can only asymptotically + * approach the output size bits of min entropy, not attain that bound. Random + * maps will tend to have output collisions, which reduces the creditable + * output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound). + * + * The value "64" is justified in Appendix A.4 of the current 90C draft, + * and aligns with NIST's in "epsilon" definition in this document, which is + * that a string can be considered "full entropy" if you can bound the min + * entropy in each bit of output to at least 1-epsilon, where epsilon is + * required to be <= 2^(-32). + */ +#define JENT_ENTROPY_SAFETY_FACTOR 64 +#include <linux/array_size.h> +#include <linux/fips.h> +#include <linux/minmax.h> #include "jitterentropy.h" /*************************************************************************** @@ -125,7 +156,48 @@ struct rand_data { * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ -/** +/* + * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B + * APT. + * https://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf + * In the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)). + * (The original formula wasn't correct because the first symbol must + * necessarily have been observed, so there is no chance of observing 0 of these + * symbols.) + * + * For the alpha < 2^-53, R cannot be used as it uses a float data type without + * arbitrary precision. A SageMath script is used to calculate those cutoff + * values. + * + * For any value above 14, this yields the maximal allowable value of 512 + * (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that + * renders the test unable to fail). + */ +static const unsigned int jent_apt_cutoff_lookup[15] = { + 325, 422, 459, 477, 488, 494, 499, 502, + 505, 507, 508, 509, 510, 511, 512 }; +static const unsigned int jent_apt_cutoff_permanent_lookup[15] = { + 355, 447, 479, 494, 502, 507, 510, 512, + 512, 512, 512, 512, 512, 512, 512 }; + +static void jent_apt_init(struct rand_data *ec, unsigned int osr) +{ + /* + * Establish the apt_cutoff based on the presumed entropy rate of + * 1/osr. + */ + if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) { + ec->apt_cutoff = jent_apt_cutoff_lookup[ + ARRAY_SIZE(jent_apt_cutoff_lookup) - 1]; + ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[ + ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1]; + } else { + ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1]; + ec->apt_cutoff_permanent = + jent_apt_cutoff_permanent_lookup[osr - 1]; + } +} +/* * Reset the APT counter * * @ec [in] Reference to entropy collector @@ -138,7 +210,7 @@ static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked) ec->apt_observations = 0; } -/** +/* * Insert a new entropy event into APT * * @ec [in] Reference to entropy collector @@ -156,8 +228,11 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) if (delta_masked == ec->apt_base) { ec->apt_count++; - if (ec->apt_count >= JENT_APT_CUTOFF) - ec->health_failure = 1; + /* Note, ec->apt_count starts with one. */ + if (ec->apt_count >= ec->apt_cutoff_permanent) + ec->health_failure |= JENT_APT_FAILURE_PERMANENT; + else if (ec->apt_count >= ec->apt_cutoff) + ec->health_failure |= JENT_APT_FAILURE; } ec->apt_observations++; @@ -182,7 +257,7 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) * the end. The caller of the Jitter RNG is informed with an error code. ***************************************************************************/ -/** +/* * Repetition Count Test as defined in SP800-90B section 4.4.1 * * @ec [in] Reference to entropy collector @@ -190,55 +265,38 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) */ static void jent_rct_insert(struct rand_data *ec, int stuck) { - /* - * If we have a count less than zero, a previous RCT round identified - * a failure. We will not overwrite it. - */ - if (ec->rct_count < 0) - return; - if (stuck) { ec->rct_count++; /* * The cutoff value is based on the following consideration: - * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8. - * In addition, we require an entropy value H of 1/OSR as this + * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. + * In addition, we require an entropy value H of 1/osr as this * is the minimum entropy required to provide full entropy. - * Note, we collect 64 * OSR deltas for inserting them into - * the entropy pool which should then have (close to) 64 bits - * of entropy. + * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr + * deltas for inserting them into the entropy pool which should + * then have (close to) DATA_SIZE_BITS bits of entropy in the + * conditioned output. * * Note, ec->rct_count (which equals to value B in the pseudo * code of SP800-90B section 4.4.1) starts with zero. Hence * we need to subtract one from the cutoff value as calculated - * following SP800-90B. + * following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr + * or 60*osr. */ - if ((unsigned int)ec->rct_count >= (31 * ec->osr)) { + if ((unsigned int)ec->rct_count >= (60 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure |= JENT_RCT_FAILURE_PERMANENT; + } else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) { ec->rct_count = -1; - ec->health_failure = 1; + ec->health_failure |= JENT_RCT_FAILURE; } } else { + /* Reset RCT */ ec->rct_count = 0; } } -/** - * Is there an RCT health test failure? - * - * @ec [in] Reference to entropy collector - * - * @return - * 0 No health test failure - * 1 Permanent health test failure - */ -static int jent_rct_failure(struct rand_data *ec) -{ - if (ec->rct_count < 0) - return 1; - return 0; -} - static inline __u64 jent_delta(__u64 prev, __u64 next) { #define JENT_UINT64_MAX (__u64)(~((__u64) 0)) @@ -246,7 +304,7 @@ static inline __u64 jent_delta(__u64 prev, __u64 next) (JENT_UINT64_MAX - prev + 1 + next); } -/** +/* * Stuck test by checking the: * 1st derivative of the jitter measurement (time delta) * 2nd derivative of the jitter measurement (delta of time deltas) @@ -265,7 +323,6 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) { __u64 delta2 = jent_delta(ec->last_delta, current_delta); __u64 delta3 = jent_delta(ec->last_delta2, delta2); - unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; ec->last_delta = current_delta; ec->last_delta2 = delta2; @@ -274,7 +331,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * Insert the result of the comparison of two back-to-back time * deltas. */ - jent_apt_insert(ec, delta_masked); + jent_apt_insert(ec, current_delta); if (!current_delta || !delta2 || !delta3) { /* RCT with a stuck bit */ @@ -288,19 +345,22 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) return 0; } -/** +/* * Report any health test failures * * @ec [in] Reference to entropy collector * - * @return - * 0 No health test failure - * 1 Permanent health test failure + * @return a bitmask indicating which tests failed + * 0 No health test failure + * 1 RCT failure + * 2 APT failure + * 1<<JENT_PERMANENT_FAILURE_SHIFT RCT permanent failure + * 2<<JENT_PERMANENT_FAILURE_SHIFT APT permanent failure */ -static int jent_health_failure(struct rand_data *ec) +static unsigned int jent_health_failure(struct rand_data *ec) { /* Test is only enabled in FIPS mode */ - if (!jent_fips_enabled()) + if (!fips_enabled) return 0; return ec->health_failure; @@ -310,20 +370,18 @@ static int jent_health_failure(struct rand_data *ec) * Noise sources ***************************************************************************/ -/** +/* * Update of the loop count used for the next round of * an entropy collection. * * Input: - * @ec entropy collector struct -- may be NULL * @bits is the number of low bits of the timer to consider * @min is the number of bits we shift the timer value to the right at * the end to make sure we have a guaranteed minimum value * * @return Newly calculated loop counter */ -static __u64 jent_loop_shuffle(struct rand_data *ec, - unsigned int bits, unsigned int min) +static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min) { __u64 time = 0; __u64 shuffle = 0; @@ -331,12 +389,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, unsigned int mask = (1<<bits) - 1; jent_get_nstime(&time); - /* - * Mix the current state of the random number into the shuffle - * calculation to balance that shuffle a bit more. - */ - if (ec) - time ^= ec->data; + /* * We fold the time value as much as possible to ensure that as many * bits of the time stamp are included as possible. @@ -353,89 +406,40 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, return (shuffle + (1<<min)); } -/** +/* * CPU Jitter noise source -- this is the noise source based on the CPU * execution time jitter * * This function injects the individual bits of the time value into the - * entropy pool using an LFSR. + * entropy pool using a hash. * - * The code is deliberately inefficient with respect to the bit shifting - * and shall stay that way. This function is the root cause why the code - * shall be compiled without optimization. This function not only acts as - * folding operation, but this function's execution is used to measure - * the CPU execution time jitter. Any change to the loop in this function - * implies that careful retesting must be done. - * - * @ec [in] entropy collector struct - * @time [in] time stamp to be injected - * @loop_cnt [in] if a value not equal to 0 is set, use the given value as - * number of loops to perform the folding - * @stuck [in] Is the time stamp identified as stuck? + * ec [in] entropy collector + * time [in] time stamp to be injected + * stuck [in] Is the time stamp identified as stuck? * * Output: - * updated ec->data - * - * @return Number of loops the folding operation is performed + * updated hash context in the entropy collector or error code */ -static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, - int stuck) +static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck) { - unsigned int i; - __u64 j = 0; - __u64 new = 0; -#define MAX_FOLD_LOOP_BIT 4 -#define MIN_FOLD_LOOP_BIT 0 - __u64 fold_loop_cnt = - jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT); - - /* - * testing purposes -- allow test app to set the counter, not - * needed during runtime - */ - if (loop_cnt) - fold_loop_cnt = loop_cnt; - for (j = 0; j < fold_loop_cnt; j++) { - new = ec->data; - for (i = 1; (DATA_SIZE_BITS) >= i; i++) { - __u64 tmp = time << (DATA_SIZE_BITS - i); - - tmp = tmp >> (DATA_SIZE_BITS - 1); - - /* - * Fibonacci LSFR with polynomial of - * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is - * primitive according to - * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf - * (the shift values are the polynomial values minus one - * due to counting bits from 0 to 63). As the current - * position is always the LSB, the polynomial only needs - * to shift data in from the left without wrap. - */ - tmp ^= ((new >> 63) & 1); - tmp ^= ((new >> 60) & 1); - tmp ^= ((new >> 55) & 1); - tmp ^= ((new >> 30) & 1); - tmp ^= ((new >> 27) & 1); - tmp ^= ((new >> 22) & 1); - new <<= 1; - new ^= tmp; - } - } - - /* - * If the time stamp is stuck, do not finally insert the value into - * the entropy pool. Although this operation should not do any harm - * even when the time stamp has no entropy, SP800-90B requires that - * any conditioning operation (SP800-90B considers the LFSR to be a - * conditioning operation) to have an identical amount of input - * data according to section 3.1.5. - */ - if (!stuck) - ec->data = new; +#define SHA3_HASH_LOOP (1<<3) + struct { + int rct_count; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + } addtl = { + ec->rct_count, + ec->apt_observations, + ec->apt_count, + ec->apt_base + }; + + return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl), + SHA3_HASH_LOOP, stuck); } -/** +/* * Memory Access noise source -- this is a noise source based on variations in * memory access times * @@ -466,7 +470,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) #define MAX_ACC_LOOP_BIT 7 #define MIN_ACC_LOOP_BIT 0 __u64 acc_loop_cnt = - jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); + jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); if (NULL == ec || NULL == ec->mem) return; @@ -500,7 +504,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) /*************************************************************************** * Start of entropy processing logic ***************************************************************************/ -/** +/* * This is the heart of the entropy generation: calculate time deltas and * use the CPU jitter in the time deltas. The jitter is injected into the * entropy pool. @@ -513,7 +517,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) * * @return result of stuck test */ -static int jent_measure_jitter(struct rand_data *ec) +static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta) { __u64 time = 0; __u64 current_delta = 0; @@ -534,39 +538,47 @@ static int jent_measure_jitter(struct rand_data *ec) stuck = jent_stuck(ec, current_delta); /* Now call the next noise sources which also injects the data */ - jent_lfsr_time(ec, current_delta, 0, stuck); + if (jent_condition_data(ec, current_delta, stuck)) + stuck = 1; + + /* return the raw entropy value */ + if (ret_current_delta) + *ret_current_delta = current_delta; return stuck; } -/** +/* * Generator of one 64 bit random number - * Function fills rand_data->data + * Function fills rand_data->hash_state * * @ec [in] Reference to entropy collector */ static void jent_gen_entropy(struct rand_data *ec) { - unsigned int k = 0; + unsigned int k = 0, safety_factor = 0; + + if (fips_enabled) + safety_factor = JENT_ENTROPY_SAFETY_FACTOR; /* priming of the ->prev_time value */ - jent_measure_jitter(ec); + jent_measure_jitter(ec, NULL); - while (1) { + while (!jent_health_failure(ec)) { /* If a stuck measurement is received, repeat measurement */ - if (jent_measure_jitter(ec)) + if (jent_measure_jitter(ec, NULL)) continue; /* * We multiply the loop value with ->osr to obtain the * oversampling rate requested by the caller */ - if (++k >= (DATA_SIZE_BITS * ec->osr)) + if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr)) break; } } -/** +/* * Entry function: Obtain entropy for the caller. * * This function invokes the entropy gathering logic as often to generate @@ -585,9 +597,9 @@ static void jent_gen_entropy(struct rand_data *ec) * @return 0 when request is fulfilled or an error * * The following error codes can occur: - * -1 entropy_collector is NULL - * -2 RCT failed - * -3 APT test failed + * -1 entropy_collector is NULL or the generation failed + * -2 Intermittent health failure + * -3 Permanent health failure */ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len) @@ -598,50 +610,38 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -1; while (len > 0) { - unsigned int tocopy; + unsigned int tocopy, health_test_result; jent_gen_entropy(ec); - if (jent_health_failure(ec)) { - int ret; - - if (jent_rct_failure(ec)) - ret = -2; - else - ret = -3; - + health_test_result = jent_health_failure(ec); + if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) { /* - * Re-initialize the noise source - * - * If the health test fails, the Jitter RNG remains - * in failure state and will return a health failure - * during next invocation. + * At this point, the Jitter RNG instance is considered + * as a failed instance. There is no rerun of the + * startup test any more, because the caller + * is assumed to not further use this instance. */ - if (jent_entropy_init()) - return ret; - - /* Set APT to initial state */ - jent_apt_reset(ec, 0); - ec->apt_base_set = 0; - - /* Set RCT to initial state */ - ec->rct_count = 0; - - /* Re-enable Jitter RNG */ - ec->health_failure = 0; - + return -3; + } else if (health_test_result) { /* - * Return the health test failure status to the - * caller as the generated value is not appropriate. + * Perform startup health tests and return permanent + * error if it fails. */ - return ret; + if (jent_entropy_init(0, 0, NULL, ec)) { + /* Mark the permanent error */ + ec->health_failure &= + JENT_RCT_FAILURE_PERMANENT | + JENT_APT_FAILURE_PERMANENT; + return -3; + } + + return -2; } - if ((DATA_SIZE_BITS / 8) < len) - tocopy = (DATA_SIZE_BITS / 8); - else - tocopy = len; - jent_memcpy(p, &ec->data, tocopy); + tocopy = min(DATA_SIZE_BITS / 8, len); + if (jent_read_random_block(ec->hash_state, p, tocopy)) + return -1; len -= tocopy; p += tocopy; @@ -655,7 +655,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, ***************************************************************************/ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, - unsigned int flags) + unsigned int flags, + void *hash_state) { struct rand_data *entropy_collector; @@ -667,20 +668,28 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, /* Allocate memory for adding variations based on memory * access */ - entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE); + entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE); if (!entropy_collector->mem) { jent_zfree(entropy_collector); return NULL; } - entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; - entropy_collector->memblocks = JENT_MEMORY_BLOCKS; + entropy_collector->memblocksize = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE; + entropy_collector->memblocks = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS; entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; } /* verify and set the oversampling rate */ if (osr == 0) - osr = 1; /* minimum sampling rate is 1 */ + osr = 1; /* H_submitter = 1 / osr */ entropy_collector->osr = osr; + entropy_collector->flags = flags; + + entropy_collector->hash_state = hash_state; + + /* Initialize the APT */ + jent_apt_init(entropy_collector, osr); /* fill the data pad with non-zero values */ jent_gen_entropy(entropy_collector); @@ -690,24 +699,39 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, void jent_entropy_collector_free(struct rand_data *entropy_collector) { - jent_zfree(entropy_collector->mem); + jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE); entropy_collector->mem = NULL; jent_zfree(entropy_collector); } -int jent_entropy_init(void) +int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, + struct rand_data *p_ec) { - int i; - __u64 delta_sum = 0; - __u64 old_delta = 0; - unsigned int nonstuck = 0; - int time_backwards = 0; - int count_mod = 0; - int count_stuck = 0; - struct rand_data ec = { 0 }; - - /* Required for RCT */ - ec.osr = 1; + /* + * If caller provides an allocated ec, reuse it which implies that the + * health test entropy data is used to further still the available + * entropy pool. + */ + struct rand_data *ec = p_ec; + int i, time_backwards = 0, ret = 0, ec_free = 0; + unsigned int health_test_result; + + if (!ec) { + ec = jent_entropy_collector_alloc(osr, flags, hash_state); + if (!ec) + return JENT_EMEM; + ec_free = 1; + } else { + /* Reset the APT */ + jent_apt_reset(ec, 0); + /* Ensure that a new APT base is obtained */ + ec->apt_base_set = 0; + /* Reset the RCT */ + ec->rct_count = 0; + /* Reset intermittent, leave permanent health test result */ + ec->health_failure &= (~JENT_RCT_FAILURE); + ec->health_failure &= (~JENT_APT_FAILURE); + } /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -736,31 +760,28 @@ int jent_entropy_init(void) #define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { - __u64 time = 0; - __u64 time2 = 0; - __u64 delta = 0; - unsigned int lowdelta = 0; - int stuck; + __u64 start_time = 0, end_time = 0, delta = 0; /* Invoke core entropy collection logic */ - jent_get_nstime(&time); - ec.prev_time = time; - jent_lfsr_time(&ec, time, 0, 0); - jent_get_nstime(&time2); + jent_measure_jitter(ec, &delta); + end_time = ec->prev_time; + start_time = ec->prev_time - delta; /* test whether timer works */ - if (!time || !time2) - return JENT_ENOTIME; - delta = jent_delta(time, time2); + if (!start_time || !end_time) { + ret = JENT_ENOTIME; + goto out; + } + /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this * implies that we also have a high resolution timer */ - if (!delta) - return JENT_ECOARSETIME; - - stuck = jent_stuck(&ec, delta); + if (!delta || (end_time == start_time)) { + ret = JENT_ECOARSETIME; + goto out; + } /* * up to here we did not modify any variable that will be @@ -772,51 +793,9 @@ int jent_entropy_init(void) if (i < CLEARCACHE) continue; - if (stuck) - count_stuck++; - else { - nonstuck++; - - /* - * Ensure that the APT succeeded. - * - * With the check below that count_stuck must be less - * than 10% of the overall generated raw entropy values - * it is guaranteed that the APT is invoked at - * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. - */ - if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { - jent_apt_reset(&ec, - delta & JENT_APT_WORD_MASK); - if (jent_health_failure(&ec)) - return JENT_EHEALTH; - } - } - - /* Validate RCT */ - if (jent_rct_failure(&ec)) - return JENT_ERCT; - /* test whether we have an increasing timer */ - if (!(time2 > time)) + if (!(end_time > start_time)) time_backwards++; - - /* use 32 bit value to ensure compilation on 32 bit arches */ - lowdelta = time2 - time; - if (!(lowdelta % 100)) - count_mod++; - - /* - * ensure that we have a varying delta timer which is necessary - * for the calculation of entropy -- perform this check - * only after the first loop is executed as we need to prime - * the old_data value - */ - if (delta > old_delta) - delta_sum += (delta - old_delta); - else - delta_sum += (old_delta - delta); - old_delta = delta; } /* @@ -826,31 +805,22 @@ int jent_entropy_init(void) * should not fail. The value of 3 should cover the NTP case being * performed during our test run. */ - if (time_backwards > 3) - return JENT_ENOMONOTONIC; - - /* - * Variations of deltas of time must on average be larger - * than 1 to ensure the entropy estimation - * implied with 1 is preserved - */ - if ((delta_sum) <= 1) - return JENT_EVARVAR; + if (time_backwards > 3) { + ret = JENT_ENOMONOTONIC; + goto out; + } - /* - * Ensure that we have variations in the time stamp below 10 for at - * least 10% of all checks -- on some platforms, the counter increments - * in multiples of 100, but not always - */ - if ((TESTLOOPCOUNT/10 * 9) < count_mod) - return JENT_ECOARSETIME; + /* Did we encounter a health test failure? */ + health_test_result = jent_health_failure(ec); + if (health_test_result) { + ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT : + JENT_EHEALTH; + goto out; + } - /* - * If we have more than 90% stuck results, then this Jitter RNG is - * likely to not work well. - */ - if ((TESTLOOPCOUNT/10 * 9) < count_stuck) - return JENT_ESTUCK; +out: + if (ec_free) + jent_entropy_collector_free(ec); - return 0; + return ret; } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index c83fff32d130..4c5dbf2a8d8f 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -1,17 +1,32 @@ // SPDX-License-Identifier: GPL-2.0-or-later +extern void *jent_kvzalloc(unsigned int len); +extern void jent_kvzfree(void *ptr, unsigned int len); extern void *jent_zalloc(unsigned int len); extern void jent_zfree(void *ptr); -extern int jent_fips_enabled(void); -extern void jent_panic(char *s); -extern void jent_memcpy(void *dest, const void *src, unsigned int n); extern void jent_get_nstime(__u64 *out); +extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + unsigned int addtl_len, __u64 hash_loop_cnt, + unsigned int stuck); +int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; -extern int jent_entropy_init(void); +extern int jent_entropy_init(unsigned int osr, unsigned int flags, + void *hash_state, struct rand_data *p_ec); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr, - unsigned int flags); + unsigned int flags, + void *hash_state); extern void jent_entropy_collector_free(struct rand_data *entropy_collector); + +#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE +int jent_raw_hires_entropy_store(__u64 value); +void jent_testing_init(void); +void jent_testing_exit(void); +#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ +static inline int jent_raw_hires_entropy_store(__u64 value) { return 0; } +static inline void jent_testing_init(void) { } +static inline void jent_testing_exit(void) { } +#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c new file mode 100644 index 000000000000..b7a6bf9da773 --- /dev/null +++ b/crypto/kdf_sp800108.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * SP800-108 Key-derivation function + * + * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de> + */ + +#include <linux/fips.h> +#include <linux/module.h> +#include <crypto/kdf_sp800108.h> +#include <crypto/internal/kdf_selftest.h> + +/* + * SP800-108 CTR KDF implementation + */ +int crypto_kdf108_ctr_generate(struct crypto_shash *kmd, + const struct kvec *info, unsigned int info_nvec, + u8 *dst, unsigned int dlen) +{ + SHASH_DESC_ON_STACK(desc, kmd); + __be32 counter = cpu_to_be32(1); + const unsigned int h = crypto_shash_digestsize(kmd), dlen_orig = dlen; + unsigned int i; + int err = 0; + u8 *dst_orig = dst; + + desc->tfm = kmd; + + while (dlen) { + err = crypto_shash_init(desc); + if (err) + goto out; + + err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32)); + if (err) + goto out; + + for (i = 0; i < info_nvec; i++) { + err = crypto_shash_update(desc, info[i].iov_base, + info[i].iov_len); + if (err) + goto out; + } + + if (dlen < h) { + u8 tmpbuffer[HASH_MAX_DIGESTSIZE]; + + err = crypto_shash_final(desc, tmpbuffer); + if (err) + goto out; + memcpy(dst, tmpbuffer, dlen); + memzero_explicit(tmpbuffer, h); + goto out; + } + + err = crypto_shash_final(desc, dst); + if (err) + goto out; + + dlen -= h; + dst += h; + counter = cpu_to_be32(be32_to_cpu(counter) + 1); + } + +out: + if (err) + memzero_explicit(dst_orig, dlen_orig); + shash_desc_zero(desc); + return err; +} +EXPORT_SYMBOL(crypto_kdf108_ctr_generate); + +/* + * The seeding of the KDF + */ +int crypto_kdf108_setkey(struct crypto_shash *kmd, + const u8 *key, size_t keylen, + const u8 *ikm, size_t ikmlen) +{ + unsigned int ds = crypto_shash_digestsize(kmd); + + /* SP800-108 does not support IKM */ + if (ikm || ikmlen) + return -EINVAL; + + /* Check according to SP800-108 section 7.2 */ + if (ds > keylen) + return -EINVAL; + + /* Set the key for the MAC used for the KDF. */ + return crypto_shash_setkey(kmd, key, keylen); +} +EXPORT_SYMBOL(crypto_kdf108_setkey); + +/* + * Test vector obtained from + * http://csrc.nist.gov/groups/STM/cavp/documents/KBKDF800-108/CounterMode.zip + */ +static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = { + { + .key = "\xdd\x1d\x91\xb7\xd9\x0b\x2b\xd3" + "\x13\x85\x33\xce\x92\xb2\x72\xfb" + "\xf8\xa3\x69\x31\x6a\xef\xe2\x42" + "\xe6\x59\xcc\x0a\xe2\x38\xaf\xe0", + .keylen = 32, + .ikm = NULL, + .ikmlen = 0, + .info = { + .iov_base = "\x01\x32\x2b\x96\xb3\x0a\xcd\x19" + "\x79\x79\x44\x4e\x46\x8e\x1c\x5c" + "\x68\x59\xbf\x1b\x1c\xf9\x51\xb7" + "\xe7\x25\x30\x3e\x23\x7e\x46\xb8" + "\x64\xa1\x45\xfa\xb2\x5e\x51\x7b" + "\x08\xf8\x68\x3d\x03\x15\xbb\x29" + "\x11\xd8\x0a\x0e\x8a\xba\x17\xf3" + "\xb4\x13\xfa\xac", + .iov_len = 60 + }, + .expected = "\x10\x62\x13\x42\xbf\xb0\xfd\x40" + "\x04\x6c\x0e\x29\xf2\xcf\xdb\xf0", + .expectedlen = 16 + } +}; + +static int __init crypto_kdf108_init(void) +{ + int ret; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return 0; + + ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", + crypto_kdf108_setkey, crypto_kdf108_ctr_generate); + if (ret) { + if (fips_enabled) + panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", + ret); + + WARN(1, + "alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", + ret); + } else if (fips_enabled) { + pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n"); + } + + return ret; +} + +static void __exit crypto_kdf108_exit(void) { } + +module_init(crypto_kdf108_init); +module_exit(crypto_kdf108_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); +MODULE_DESCRIPTION("Key Derivation Function conformant to SP800-108"); diff --git a/crypto/keywrap.c b/crypto/keywrap.c deleted file mode 100644 index 054d9a216fc9..000000000000 --- a/crypto/keywrap.c +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Key Wrapping: RFC3394 / NIST SP800-38F - * - * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, and the entire permission notice in its entirety, - * including the disclaimer of warranties. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * ALTERNATIVELY, this product may be distributed under the terms of - * the GNU General Public License, in which case the provisions of the GPL2 - * are required INSTEAD OF the above restrictions. (This clause is - * necessary due to a potential bad interaction between the GPL and - * the restrictions contained in a BSD-style copyright.) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF - * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -/* - * Note for using key wrapping: - * - * * The result of the encryption operation is the ciphertext starting - * with the 2nd semiblock. The first semiblock is provided as the IV. - * The IV used to start the encryption operation is the default IV. - * - * * The input for the decryption is the first semiblock handed in as an - * IV. The ciphertext is the data starting with the 2nd semiblock. The - * return code of the decryption operation will be EBADMSG in case an - * integrity error occurs. - * - * To obtain the full result of an encryption as expected by SP800-38F, the - * caller must allocate a buffer of plaintext + 8 bytes: - * - * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm); - * u8 data[datalen]; - * u8 *iv = data; - * u8 *pt = data + crypto_skcipher_ivsize(tfm); - * <ensure that pt contains the plaintext of size ptlen> - * sg_init_one(&sg, pt, ptlen); - * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); - * - * ==> After encryption, data now contains full KW result as per SP800-38F. - * - * In case of decryption, ciphertext now already has the expected length - * and must be segmented appropriately: - * - * unsigned int datalen = CTLEN; - * u8 data[datalen]; - * <ensure that data contains full ciphertext> - * u8 *iv = data; - * u8 *ct = data + crypto_skcipher_ivsize(tfm); - * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm); - * sg_init_one(&sg, ct, ctlen); - * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv); - * - * ==> After decryption (which hopefully does not return EBADMSG), the ct - * pointer now points to the plaintext of size ctlen. - * - * Note 2: KWP is not implemented as this would defy in-place operation. - * If somebody wants to wrap non-aligned data, he should simply pad - * the input with zeros to fill it up to the 8 byte boundary. - */ - -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/scatterlist.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/skcipher.h> - -struct crypto_kw_block { -#define SEMIBSIZE 8 - __be64 A; - __be64 R; -}; - -/* - * Fast forward the SGL to the "end" length minus SEMIBSIZE. - * The start in the SGL defined by the fast-forward is returned with - * the walk variable - */ -static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, - struct scatterlist *sg, - unsigned int end) -{ - unsigned int skip = 0; - - /* The caller should only operate on full SEMIBLOCKs. */ - BUG_ON(end < SEMIBSIZE); - - skip = end - SEMIBSIZE; - while (sg) { - if (sg->length > skip) { - scatterwalk_start(walk, sg); - scatterwalk_advance(walk, skip); - break; - } - - skip -= sg->length; - sg = sg_next(sg); - } -} - -static int crypto_kw_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 6 * ((req->cryptlen) >> 3); - unsigned int i; - int ret = 0; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* Place the IV into block A */ - memcpy(&block.A, req->iv, SEMIBSIZE); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - while (nbytes) { - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&src_walk, src, nbytes); - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t--; - /* perform KW operation: decrypt block */ - crypto_cipher_decrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes); - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* Perform authentication check */ - if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) - ret = -EBADMSG; - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return ret; -} - -static int crypto_kw_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 1; - unsigned int i; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV that occupies the first semiblock. - * This means that the dst memory must be one semiblock larger than src. - * Also ensure that the given data is aligned to semiblock. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* - * Place the predefined IV into block A -- for encrypt, the caller - * does not need to provide an IV, but he needs to fetch the final IV. - */ - block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - scatterwalk_start(&src_walk, src); - scatterwalk_start(&dst_walk, dst); - - while (nbytes) { - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: encrypt block */ - crypto_cipher_encrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t++; - - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* establish the IV for the caller to pick up */ - memcpy(req->iv, &block.A, SEMIBSIZE); - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return 0; -} - -static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct skcipher_instance *inst; - struct crypto_alg *alg; - int err; - - inst = skcipher_alloc_instance_simple(tmpl, tb); - if (IS_ERR(inst)) - return PTR_ERR(inst); - - alg = skcipher_ialg_simple(inst); - - err = -EINVAL; - /* Section 5.1 requirement for KW */ - if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) - goto out_free_inst; - - inst->alg.base.cra_blocksize = SEMIBSIZE; - inst->alg.base.cra_alignmask = 0; - inst->alg.ivsize = SEMIBSIZE; - - inst->alg.encrypt = crypto_kw_encrypt; - inst->alg.decrypt = crypto_kw_decrypt; - - err = skcipher_register_instance(tmpl, inst); - if (err) { -out_free_inst: - inst->free(inst); - } - - return err; -} - -static struct crypto_template crypto_kw_tmpl = { - .name = "kw", - .create = crypto_kw_create, - .module = THIS_MODULE, -}; - -static int __init crypto_kw_init(void) -{ - return crypto_register_template(&crypto_kw_tmpl); -} - -static void __exit crypto_kw_exit(void) -{ - crypto_unregister_template(&crypto_kw_tmpl); -} - -subsys_initcall(crypto_kw_init); -module_exit(crypto_kw_exit); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); -MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)"); -MODULE_ALIAS_CRYPTO("kw"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/khazad.c b/crypto/khazad.c index f19339954c89..024264ee9cd1 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -19,11 +19,11 @@ * */ +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> -#include <linux/crypto.h> +#include <linux/unaligned.h> #include <linux/types.h> #define KHAZAD_KEY_SIZE 16 @@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; - /* key is supposed to be 32-bit aligned */ - K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); - K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); + K2 = get_unaligned_be64(&in_key[0]); + K1 = get_unaligned_be64(&in_key[8]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { @@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], - u8 *ciphertext, const u8 *plaintext) + u8 *dst, const u8 *src) { - const __be64 *src = (const __be64 *)plaintext; - __be64 *dst = (__be64 *)ciphertext; int r; u64 state; - state = be64_to_cpu(*src) ^ roundKey[0]; + state = get_unaligned_be64(src) ^ roundKey[0]; for (r = 1; r < KHAZAD_ROUNDS; r++) { state = T0[(int)(state >> 56) ] ^ @@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ roundKey[KHAZAD_ROUNDS]; - *dst = cpu_to_be64(state); + put_unaligned_be64(state, dst); } static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_ctxsize = sizeof (struct khazad_ctx), - .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = KHAZAD_KEY_SIZE, @@ -876,7 +871,7 @@ static void __exit khazad_mod_fini(void) } -subsys_initcall(khazad_mod_init); +module_init(khazad_mod_init); module_exit(khazad_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/kpp.c b/crypto/kpp.c index 313b2c699963..2e0cefe7a25f 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -5,23 +5,20 @@ * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> */ + +#include <crypto/internal/kpp.h> +#include <linux/cryptouser.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> -#include <linux/slab.h> #include <linux/string.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> -#include <linux/cryptouser.h> -#include <linux/compiler.h> #include <net/netlink.h> -#include <crypto/kpp.h> -#include <crypto/internal/kpp.h> + #include "internal.h" -#ifdef CONFIG_NET -static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_kpp_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_kpp rkpp; @@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp); } -#else -static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -68,17 +59,28 @@ static int crypto_kpp_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_kpp_free_instance(struct crypto_instance *inst) +{ + struct kpp_instance *kpp = kpp_instance(inst); + + kpp->free(kpp); +} + static const struct crypto_type crypto_kpp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_kpp_init_tfm, + .free = crypto_kpp_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_kpp_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_kpp_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_KPP, .tfmsize = offsetof(struct crypto_kpp, base), + .algsize = offsetof(struct kpp_alg, base), }; struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) @@ -87,6 +89,21 @@ struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_kpp); +int crypto_grab_kpp(struct crypto_kpp_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_kpp_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_kpp); + +int crypto_has_kpp(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_kpp_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_kpp); + static void kpp_prepare_alg(struct kpp_alg *alg) { struct crypto_alg *base = &alg->base; @@ -111,5 +128,17 @@ void crypto_unregister_kpp(struct kpp_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_kpp); +int kpp_register_instance(struct crypto_template *tmpl, + struct kpp_instance *inst) +{ + if (WARN_ON(!inst->free)) + return -EINVAL; + + kpp_prepare_alg(&inst->alg); + + return crypto_register_instance(tmpl, kpp_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(kpp_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Key-agreement Protocol Primitives"); diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig new file mode 100644 index 000000000000..4d0476e13f3c --- /dev/null +++ b/crypto/krb5/Kconfig @@ -0,0 +1,26 @@ +config CRYPTO_KRB5 + tristate "Kerberos 5 crypto" + select CRYPTO_MANAGER + select CRYPTO_KRB5ENC + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_HASH_INFO + select CRYPTO_HMAC + select CRYPTO_CMAC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_CBC + select CRYPTO_CTS + select CRYPTO_AES + select CRYPTO_CAMELLIA + help + Provide a library for provision of Kerberos-5-based crypto. This is + intended for network filesystems to use. + +config CRYPTO_KRB5_SELFTESTS + bool "Kerberos 5 crypto selftests" + depends on CRYPTO_KRB5 + help + Turn on some self-testing for the kerberos 5 crypto functions. These + will be performed on module load or boot, if compiled in. diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile new file mode 100644 index 000000000000..d38890c0b247 --- /dev/null +++ b/crypto/krb5/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for asymmetric cryptographic keys +# + +krb5-y += \ + krb5_kdf.o \ + krb5_api.o \ + rfc3961_simplified.o \ + rfc3962_aes.o \ + rfc6803_camellia.o \ + rfc8009_aes2.o + +krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \ + selftest.o \ + selftest_data.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5.o diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h new file mode 100644 index 000000000000..a59084ffafe8 --- /dev/null +++ b/crypto/krb5/internal.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos5 crypto internals + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/scatterlist.h> +#include <crypto/krb5.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +/* + * Profile used for key derivation and encryption. + */ +struct krb5_crypto_profile { + /* Pseudo-random function */ + int (*calc_PRF)(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp); + + /* Checksum key derivation */ + int (*calc_Kc)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Kc, + gfp_t gfp); + + /* Encryption key derivation */ + int (*calc_Ke)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ke, + gfp_t gfp); + + /* Integrity key derivation */ + int (*calc_Ki)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ki, + gfp_t gfp); + + /* Derive the keys needed for an encryption AEAD object. */ + int (*derive_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for an encryption AEAD object. */ + int (*load_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Derive the key needed for a checksum hash object. */ + int (*derive_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for a checksum hash object. */ + int (*load_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Encrypt data in-place, inserting confounder and checksum. */ + ssize_t (*encrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); + + /* Decrypt data in-place, removing confounder and checksum */ + int (*decrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + + /* Generate a MIC on part of a packet, inserting the checksum */ + ssize_t (*get_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); + + /* Verify the MIC on a piece of data, removing the checksum */ + int (*verify_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +}; + +/* + * Crypto size/alignment rounding convenience macros. + */ +#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN)) + +#define krb5_aead_size(TFM) \ + crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM)) +#define krb5_aead_ivsize(TFM) \ + crypto_roundup(crypto_aead_ivsize(TFM)) +#define krb5_shash_size(TFM) \ + crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM)) +#define krb5_digest_size(TFM) \ + crypto_roundup(crypto_shash_digestsize(TFM)) +#define round16(x) (((x) + 15) & ~15) + +/* + * Self-testing data. + */ +struct krb5_prf_test { + u32 etype; + const char *name, *key, *octet, *prf; +}; + +struct krb5_key_test_one { + u32 use; + const char *key; +}; + +struct krb5_key_test { + u32 etype; + const char *name, *key; + struct krb5_key_test_one Kc, Ke, Ki; +}; + +struct krb5_enc_test { + u32 etype; + u32 usage; + const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct; +}; + +struct krb5_mic_test { + u32 etype; + u32 usage; + const char *name, *plain, *K0, *Kc, *mic; +}; + +/* + * krb5_api.c + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp); +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp); + +/* + * krb5_kdf.c + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); + +/* + * rfc3961_simplified.c + */ +extern const struct krb5_crypto_profile rfc3961_simplified_profile; + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len); +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len); +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +/* + * rfc3962_aes.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96; + +/* + * rfc6803_camellia.c + */ +extern const struct krb5_enctype krb5_camellia128_cts_cmac; +extern const struct krb5_enctype krb5_camellia256_cts_cmac; + +/* + * rfc8009_aes2.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192; + +/* + * selftest.c + */ +#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS +int krb5_selftest(void); +#else +static inline int krb5_selftest(void) { return 0; } +#endif + +/* + * selftest_data.c + */ +extern const struct krb5_prf_test krb5_prf_tests[]; +extern const struct krb5_key_test krb5_key_tests[]; +extern const struct krb5_enc_test krb5_enc_tests[]; +extern const struct krb5_mic_test krb5_mic_tests[]; diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c new file mode 100644 index 000000000000..23026d4206c8 --- /dev/null +++ b/crypto/krb5/krb5_api.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos 5 crypto library. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include "internal.h" + +MODULE_DESCRIPTION("Kerberos 5 crypto"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +static const struct krb5_enctype *const krb5_supported_enctypes[] = { + &krb5_aes128_cts_hmac_sha1_96, + &krb5_aes256_cts_hmac_sha1_96, + &krb5_aes128_cts_hmac_sha256_128, + &krb5_aes256_cts_hmac_sha384_192, + &krb5_camellia128_cts_cmac, + &krb5_camellia256_cts_cmac, +}; + +/** + * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type + * @enctype: The standard Kerberos encryption type number + * + * Look up a Kerberos encryption type by number. If successful, returns a + * pointer to the type tables; returns NULL otherwise. + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype) +{ + const struct krb5_enctype *krb5; + size_t i; + + for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) { + krb5 = krb5_supported_enctypes[i]; + if (krb5->etype == enctype) + return krb5; + } + + return NULL; +} +EXPORT_SYMBOL(crypto_krb5_find_enctype); + +/** + * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @data_size: How much data we want to allow for + * @_offset: Where to place the offset into the buffer + * + * Calculate how much buffer space is required to wrap a given amount of data. + * This allows for a confounder, padding and checksum as appropriate. The + * amount of buffer required is returned and the offset into the buffer at + * which the data will start is placed in *_offset. + */ +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset = krb5->cksum_len; + return krb5->cksum_len + data_size; + + case KRB5_ENCRYPT_MODE: + *_offset = krb5->conf_len; + return krb5->conf_len + data_size + krb5->cksum_len; + + default: + WARN_ON(1); + *_offset = 0; + return 0; + } +} +EXPORT_SYMBOL(crypto_krb5_how_much_buffer); + +/** + * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @_buffer_size: How much buffer we want to allow for (may be reduced) + * @_offset: Where to place the offset into the buffer + * + * Calculate how much data can be fitted into given amount of buffer. This + * allows for a confounder, padding and checksum as appropriate. The amount of + * data that will fit is returned, the amount of buffer required is shrunk to + * allow for alignment and the offset into the buffer at which the data will + * start is placed in *_offset. + */ +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset) +{ + size_t buffer_size = *_buffer_size, data_size; + + switch (mode) { + case KRB5_CHECKSUM_MODE: + if (WARN_ON(buffer_size < krb5->cksum_len + 1)) + goto bad; + *_offset = krb5->cksum_len; + return buffer_size - krb5->cksum_len; + + case KRB5_ENCRYPT_MODE: + if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len)) + goto bad; + data_size = buffer_size - krb5->cksum_len; + *_offset = krb5->conf_len; + return data_size - krb5->conf_len; + + default: + WARN_ON(1); + goto bad; + } + +bad: + *_offset = 0; + return 0; +} +EXPORT_SYMBOL(crypto_krb5_how_much_data); + +/** + * crypto_krb5_where_is_the_data - Find the data in a decrypted message + * @krb5: The encoding to use. + * @mode: Mode of operation + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Find the offset and size of the data in a secure message so that this + * information can be used in the metadata buffer which will get added to the + * digest by crypto_krb5_verify_mic(). + */ +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + return; + case KRB5_ENCRYPT_MODE: + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + return; + default: + WARN_ON_ONCE(1); + return; + } +} +EXPORT_SYMBOL(crypto_krb5_where_is_the_data); + +/* + * Prepare the encryption with derived key data. + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_aead_setkey(ci, keys->data, keys->len); + if (ret < 0) { + pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + ret = crypto_aead_setauthsize(ci, krb5->cksum_len); + if (ret < 0) { + pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_aead(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch encrypt and decrypt operations. + */ +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp); + if (ret < 0) + goto err; + + ci = krb5_prepare_encryption(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_encryption); + +/* + * Prepare the checksum with derived key data. + */ +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_shash_setkey(ci, Kc->data, Kc->len); + if (ret < 0) { + pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_shash(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch get_mic and verify_mic operations. + */ +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp); + if (ret < 0) { + pr_err("get_Kc failed %d\n", ret); + goto err; + } + + ci = krb5_prepare_checksum(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_checksum); + +/** + * crypto_krb5_encrypt - Apply Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * @preconfounded: True if the confounder is already inserted. + * + * Using the specified Kerberos encoding, insert a confounder and padding as + * needed, encrypt this and the data in place and insert an integrity checksum + * into the buffer. + * + * The buffer must include space for the confounder, the checksum and any + * padding required. The caller can preinsert the confounder into the buffer + * (for testing, for example). + * + * The resulting secured blob may be less than the size of the buffer. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder + * is too short or the data is misaligned. Other errors may also be returned + * from the crypto layer. + */ +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len, + data_offset, data_len, preconfounded); +} +EXPORT_SYMBOL(crypto_krb5_encrypt); + +/** + * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum and decrypt the secure region, stripping off the confounder. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data plus the trailing padding are stored. The caller is responsible + * for working out how much padding there is and removing it. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the integrity checksum doesn't match). Other errors may also be returned + * from the crypto layer. + */ +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_decrypt); + +/** + * crypto_krb5_get_mic - Apply Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * + * Using the specified Kerberos encoding, calculate and insert an integrity + * checksum into the buffer. + * + * The buffer must include space for the checksum at the front. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for + * the checksum is too short. Other errors may also be returned from the + * crypto layer. + */ +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len, + data_offset, data_len); +} +EXPORT_SYMBOL(crypto_krb5_get_mic); + +/** + * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data is stored. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the checksum doesn't match). Other errors may also be returned from the + * crypto layer. + */ +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg, + _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_verify_mic); + +static int __init crypto_krb5_init(void) +{ + return krb5_selftest(); +} +module_init(crypto_krb5_init); + +static void __exit crypto_krb5_exit(void) +{ +} +module_exit(crypto_krb5_exit); diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c new file mode 100644 index 000000000000..6699e5469d1b --- /dev/null +++ b/crypto/krb5/krb5_kdf.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos key derivation. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/export.h> +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/** + * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402] + * @krb5: The encryption type to use + * @K: The protocol key for the pseudo-random function + * @L: The length of the output + * @S: The input octet string + * @result: Result buffer, sized to krb5->prf_len + * @gfp: Allocation restrictions + * + * Calculate the kerberos pseudo-random function, PRF+() by the following + * method: + * + * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn) + * Tn = PRF(K, n || S) + * [rfc4402 sec 2] + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct krb5_buffer T_series, Tn, n_S; + void *buffer; + int ret, n = 1; + + Tn.len = krb5->prf_len; + T_series.len = 0; + n_S.len = 4 + S->len; + + buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp); + if (!buffer) + return -ENOMEM; + + T_series.data = buffer; + n_S.data = buffer + round16(L + Tn.len); + memcpy(n_S.data + 4, S->data, S->len); + + while (T_series.len < L) { + *(__be32 *)(n_S.data) = htonl(n); + Tn.data = T_series.data + Tn.len * (n - 1); + ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp); + if (ret < 0) + goto err; + T_series.len += Tn.len; + n++; + } + + /* Truncate to L */ + memcpy(result->data, T_series.data, L); + ret = 0; + +err: + kfree_sensitive(buffer); + return ret; +} +EXPORT_SYMBOL(crypto_krb5_calc_PRFplus); + +/** + * krb5_derive_Kc - Derive key Kc and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Kc checksumming key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_CHECKSUM; + + key->len = krb5->Kc_len; + return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ke - Derive key Ke and install into an skcipher + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ke encryption key. The key is stored into the prepared + * buffer. + */ +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_ENCRYPTION; + + key->len = krb5->Ke_len; + return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ki - Derive key Ki and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ki integrity checksum key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_INTEGRITY; + + key->len = krb5->Ki_len; + return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp); +} diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c new file mode 100644 index 000000000000..e49cbdec7c40 --- /dev/null +++ b/crypto/krb5/rfc3961_simplified.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3961 Kerberos 5 simplified crypto profile. + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/lcm.h> +#include <linux/rtnetlink.h> +#include <crypto/authenc.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/* Maximum blocksize for the supported crypto algorithms */ +#define KRB5_MAX_BLOCKSIZE (16) + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len) +{ + struct sg_mapping_iter miter; + size_t i, n; + int ret = 0; + + sg_miter_start(&miter, sg, sg_nents(sg), + SG_MITER_FROM_SG | SG_MITER_LOCAL); + sg_miter_skip(&miter, offset); + for (i = 0; i < len; i += n) { + sg_miter_next(&miter); + n = min(miter.length, len - i); + ret = crypto_shash_update(desc, miter.addr, n); + if (ret < 0) + break; + } + sg_miter_stop(&miter); + return ret; +} + +static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv, + const struct krb5_buffer *in, struct krb5_buffer *out) +{ + struct scatterlist sg[1]; + u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0}; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + int ret; + + if (WARN_ON(in->len != out->len)) + return -EINVAL; + if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0) + return -EINVAL; + + if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE) + return -EINVAL; + + if (iv) + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); + + memcpy(out->data, in->data, out->len); + sg_init_one(sg, out->data, out->len); + + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg, sg, out->len, local_iv); + + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + return ret; +} + +/* + * Calculate an unkeyed basic hash. + */ +static int rfc3961_calc_H(const struct krb5_enctype *krb5, + const struct krb5_buffer *data, + struct krb5_buffer *digest, + gfp_t gfp) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t desc_size; + int ret = -ENOMEM; + + tfm = crypto_alloc_shash(krb5->hash_name, 0, 0); + if (IS_ERR(tfm)) + return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + + desc = kzalloc(desc_size, gfp); + if (!desc) + goto error_tfm; + + digest->len = crypto_shash_digestsize(tfm); + digest->data = kzalloc(digest->len, gfp); + if (!digest->data) + goto error_desc; + + desc->tfm = tfm; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error_digest; + + ret = crypto_shash_finup(desc, data->data, data->len, digest->data); + if (ret < 0) + goto error_digest; + + goto error_desc; + +error_digest: + kfree_sensitive(digest->data); +error_desc: + kfree_sensitive(desc); +error_tfm: + crypto_free_shash(tfm); + return ret; +} + +/* + * This is the n-fold function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ +static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result) +{ + const u8 *in = source->data; + u8 *out = result->data; + unsigned long ulcm; + unsigned int inbits, outbits; + int byte, i, msbit; + + /* the code below is more readable if I make these bytes instead of bits */ + inbits = source->len; + outbits = result->len; + + /* first compute lcm(n,k) */ + ulcm = lcm(inbits, outbits); + + /* now do the real work */ + memset(out, 0, outbits); + byte = 0; + + /* this will end up cycling through k lcm(k,n)/k times, which + * is correct. + */ + for (i = ulcm-1; i >= 0; i--) { + /* compute the msbit in k which gets added into this byte */ + msbit = ( + /* first, start with the msbit in the first, + * unrotated byte + */ + ((inbits << 3) - 1) + + /* then, for each byte, shift to the right + * for each repetition + */ + (((inbits << 3) + 13) * (i/inbits)) + + /* last, pick out the correct byte within + * that shifted repetition + */ + ((inbits - (i % inbits)) << 3) + ) % (inbits << 3); + + /* pull out the byte value itself */ + byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) | + (in[((inbits) - (msbit >> 3)) % inbits])) + >> ((msbit & 7) + 1)) & 0xff; + + /* do the addition */ + byte += out[i % outbits]; + out[i % outbits] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + + /* if there's a carry bit left over, add it back in */ + if (byte) { + for (i = outbits - 1; i >= 0; i--) { + /* do the addition */ + byte += out[i]; + out[i] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + } +} + +/* + * Calculate a derived key, DK(Base Key, Well-Known Constant) + * + * DK(Key, Constant) = random-to-key(DR(Key, Constant)) + * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)) + * K1 = E(Key, n-fold(Constant), initial-cipher-state) + * K2 = E(Key, K1, initial-cipher-state) + * K3 = E(Key, K2, initial-cipher-state) + * K4 = ... + * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...) + * [rfc3961 sec 5.1] + */ +static int rfc3961_calc_DK(const struct krb5_enctype *krb5, + const struct krb5_buffer *inkey, + const struct krb5_buffer *in_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + unsigned int blocksize, keybytes, keylength, n; + struct krb5_buffer inblock, outblock, rawkey; + struct crypto_sync_skcipher *cipher; + int ret = -EINVAL; + + blocksize = krb5->block_len; + keybytes = krb5->key_bytes; + keylength = krb5->key_len; + + if (inkey->len != keylength || result->len != keylength) + return -EINVAL; + if (!krb5->random_to_key && result->len != keybytes) + return -EINVAL; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err_return; + } + ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len); + if (ret < 0) + goto err_free_cipher; + + ret = -ENOMEM; + inblock.data = kzalloc(blocksize * 2 + keybytes, gfp); + if (!inblock.data) + goto err_free_cipher; + + inblock.len = blocksize; + outblock.data = inblock.data + blocksize; + outblock.len = blocksize; + rawkey.data = outblock.data + blocksize; + rawkey.len = keybytes; + + /* initialize the input block */ + + if (in_constant->len == inblock.len) + memcpy(inblock.data, in_constant->data, inblock.len); + else + rfc3961_nfold(in_constant, &inblock); + + /* loop encrypting the blocks until enough key bytes are generated */ + n = 0; + while (n < rawkey.len) { + rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock); + + if (keybytes - n <= outblock.len) { + memcpy(rawkey.data + n, outblock.data, keybytes - n); + break; + } + + memcpy(rawkey.data + n, outblock.data, outblock.len); + memcpy(inblock.data, outblock.data, outblock.len); + n += outblock.len; + } + + /* postprocess the key */ + if (!krb5->random_to_key) { + /* Identity random-to-key function. */ + memcpy(result->data, rawkey.data, rawkey.len); + ret = 0; + } else { + ret = krb5->random_to_key(krb5, &rawkey, result); + } + + kfree_sensitive(inblock.data); +err_free_cipher: + crypto_free_sync_skcipher(cipher); +err_return: + return ret; +} + +/* + * Calculate single encryption, E() + * + * E(Key, octets) + */ +static int rfc3961_calc_E(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *in_data, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_sync_skcipher *cipher; + int ret; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err; + } + + ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len); + if (ret < 0) + goto err_free; + + ret = rfc3961_do_encrypt(cipher, NULL, in_data, result); + +err_free: + crypto_free_sync_skcipher(cipher); +err: + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * tmp1 = H(octet-string) + * tmp2 = truncate tmp1 to multiple of m + * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc3961 sec 5.3] + */ +static int rfc3961_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct krb5_buffer derived_key; + struct krb5_buffer tmp1, tmp2; + unsigned int m = krb5->block_len; + void *buffer; + int ret; + + if (result->len != krb5->prf_len) + return -EINVAL; + + tmp1.len = krb5->hash_len; + derived_key.len = krb5->key_bytes; + buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp); + if (!buffer) + return -ENOMEM; + + tmp1.data = buffer; + derived_key.data = buffer + round16(tmp1.len); + + ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp); + if (ret < 0) + goto err; + + tmp2.len = tmp1.len & ~(m - 1); + tmp2.data = tmp1.data; + + ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp); + if (ret < 0) + goto err; + + ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp); + +err: + kfree_sensitive(buffer); + return ret; +} + +/* + * Derive the Ke and Ki keys and package them into a key parameter that can be + * given to the setkey of a authenc AEAD crypto object. + */ +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct krb5_buffer Ke, Ki; + struct rtattr *rta; + int ret; + + Ke.len = krb5->Ke_len; + Ki.len = krb5->Ki_len; + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke.len); + + Ki.data = (void *)(param + 1); + Ke.data = Ki.data + Ki.len; + + ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp); + if (ret < 0) { + pr_err("get_Ke failed %d\n", ret); + return ret; + } + ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp); + if (ret < 0) + pr_err("get_Ki failed %d\n", ret); + return ret; +} + +/* + * Package predefined Ke and Ki keys and into a key parameter that can be given + * to the setkey of an authenc AEAD crypto object. + */ +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct rtattr *rta; + + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke->len); + memcpy((void *)(param + 1), Ki->data, Ki->len); + memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len); + return 0; +} + +/* + * Derive the Kc key for checksum-only mode and package it into a key parameter + * that can be given to the setkey of a hash crypto object. + */ +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + int ret; + + setkey->len = krb5->Kc_len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp); + if (ret < 0) + pr_err("get_Kc failed %d\n", ret); + return ret; +} + +/* + * Package a predefined Kc key for checksum-only mode into a key parameter that + * can be given to the setkey of a hash crypto object. + */ +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + setkey->len = krb5->Kc_len; + setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + return 0; +} + +/* + * Apply encryption and checksumming functions to part of a scatterlist. + */ +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* Hash and encrypt the message. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. The offset and + * length are updated to reflect the actual content of the encrypted region. + */ +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + size_t bsize; + void *buffer; + int ret; + u8 *iv; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Decrypt the message and verify its checksum. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Generate a checksum over some metadata and part of an skbuff and insert the + * MIC into the skbuff immediately prior to the data. + */ +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len) +{ + struct shash_desc *desc; + ssize_t ret, done; + size_t bsize; + void *buffer, *digest; + + if (WARN_ON(data_offset != krb5->cksum_len)) + return -EMSGSIZE; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Calculate the MIC with key Kc and store it into the skb */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + ret = crypto_shash_update_sg(desc, sg, data_offset, data_len); + if (ret < 0) + goto error; + + digest = buffer + krb5_shash_size(shash); + ret = crypto_shash_final(desc, digest); + if (ret < 0) + goto error; + + ret = -EFAULT; + done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len, + data_offset - krb5->cksum_len); + if (done != krb5->cksum_len) + goto error; + + ret = krb5->cksum_len + data_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Check the MIC on a region of an skbuff. The offset and length are updated + * to reflect the actual content of the secure region. + */ +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct shash_desc *desc; + ssize_t done; + size_t bsize, data_offset, data_len, offset = *_offset, len = *_len; + void *buffer = NULL; + int ret; + u8 *cksum, *cksum2; + + if (len < krb5->cksum_len) + return -EPROTO; + data_offset = offset + krb5->cksum_len; + data_len = len - krb5->cksum_len; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + cksum = buffer + + krb5_shash_size(shash); + cksum2 = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + /* Calculate the MIC */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + crypto_shash_update_sg(desc, sg, data_offset, data_len); + crypto_shash_final(desc, cksum); + + ret = -EFAULT; + done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset); + if (done != krb5->cksum_len) + goto error; + + if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) { + ret = -EBADMSG; + goto error; + } + + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +const struct krb5_crypto_profile rfc3961_simplified_profile = { + .calc_PRF = rfc3961_calc_PRF, + .calc_Kc = rfc3961_calc_DK, + .calc_Ke = rfc3961_calc_DK, + .calc_Ki = rfc3961_calc_DK, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c new file mode 100644 index 000000000000..5cbf8f4638b9 --- /dev/null +++ b/crypto/krb5/rfc3962_aes.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5 + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128, + .name = "aes128-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256, + .name = "aes256-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c new file mode 100644 index 000000000000..77cd4ce023f1 --- /dev/null +++ b/crypto/krb5/rfc6803_camellia.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc6803 Camellia Encryption for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include "internal.h" + +/* + * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant) + * + * n = ceiling(k / 128) + * K(0) = zeros + * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k) + * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n)) + * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant)) + * + * [rfc6803 sec 3] + */ +static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize, offset, seg; + void *buffer; + u32 i = 0, k = result->len * 8; + u8 *p; + int ret = -ENOMEM; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -ENOMEM; + K.len = crypto_shash_digestsize(shash); + data.len = K.len + 4 + constant->len + 1 + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + + K.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len); + + p = data.data + K.len + 4; + memcpy(p, constant->data, constant->len); + p += constant->len; + *p++ = 0x00; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + offset = 0; + do { + i++; + p = data.data; + memcpy(p, K.data, K.len); + p += K.len; + *(__be32 *)p = htonl(i); + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + ret = crypto_shash_finup(desc, data.data, data.len, K.data); + if (ret < 0) + goto error; + + seg = min_t(size_t, result->len - offset, K.len); + memcpy(result->data + offset, K.data, seg); + offset += seg; + } while (offset < result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf") + * PRF = CMAC(Kp, octet-string) + * [rfc6803 sec 6] + */ +static int rfc6803_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct crypto_shash *shash; + struct krb5_buffer Kp; + struct shash_desc *desc; + size_t bsize; + void *buffer; + int ret; + + Kp.len = krb5->prf_len; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + + ret = -EINVAL; + if (result->len != crypto_shash_digestsize(shash)) + goto out_shash; + + ret = -ENOMEM; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(Kp.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto out_shash; + + Kp.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant, + &Kp, gfp); + if (ret < 0) + goto out; + + ret = crypto_shash_setkey(shash, Kp.data, Kp.len); + if (ret < 0) + goto out; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto out; + + ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data); + if (ret < 0) + goto out; + +out: + kfree_sensitive(buffer); +out_shash: + crypto_free_shash(shash); + return ret; +} + + +static const struct krb5_crypto_profile rfc6803_crypto_profile = { + .calc_PRF = rfc6803_calc_PRF, + .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_camellia128_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128, + .name = "camellia128-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; + +const struct krb5_enctype krb5_camellia256_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256, + .name = "camellia256-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c new file mode 100644 index 000000000000..d39851fc3a4e --- /dev/null +++ b/crypto/krb5/rfc8009_aes2.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/authenc.h> +#include "internal.h" + +static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" }; + +/* + * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k) + * + * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1) + * + * Using the appropriate one of: + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k) + * [rfc8009 sec 3] + */ +static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *label, + const struct krb5_buffer *context, + unsigned int k, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K1, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize; + void *buffer; + u8 *p; + int ret = -ENOMEM; + + if (WARN_ON(result->len != k / 8)) + return -EINVAL; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -EINVAL; + if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k)) + goto error_shash; + + ret = -ENOMEM; + data.len = 4 + label->len + 1 + context->len + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + p = data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + *(__be32 *)p = htonl(0x00000001); + p += 4; + memcpy(p, label->data, label->len); + p += label->len; + *p++ = 0; + memcpy(p, context->data, context->len); + p += context->len; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + K1.len = crypto_shash_digestsize(shash); + K1.data = buffer + + krb5_shash_size(shash); + + ret = crypto_shash_finup(desc, data.data, data.len, K1.data); + if (ret < 0) + goto error; + + memcpy(result->data, K1.data, result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256) + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc8009 sec 5] + */ +static int rfc8009_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *input_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + + return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant, + octet_string, krb5->prf_len * 8, + result, gfp); +} + +/* + * Derive Ke. + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128) + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ke(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->key_bytes * 8, + result, gfp); +} + +/* + * Derive Kc/Ki + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128) + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ki(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->cksum_len * 8, + result, gfp); +} + +/* + * Apply encryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + */ +static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv, *ad; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Hash and encrypt the message. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + * + * The offset and length are updated to reflect the actual content of the + * encrypted region. + */ +static int rfc8009_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + size_t bsize; + void *buffer; + int ret; + u8 *iv, *ad; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Decrypt the message and verify its checksum. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +static const struct krb5_crypto_profile rfc8009_crypto_profile = { + .calc_PRF = rfc8009_calc_PRF, + .calc_Kc = rfc8009_calc_Ki, + .calc_Ke = rfc8009_calc_Ke, + .calc_Ki = rfc8009_calc_Ki, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = rfc8009_encrypt, + .decrypt = rfc8009_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128, + .name = "aes128-cts-hmac-sha256-128", + .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))", + .cksum_name = "hmac(sha256)", + .hash_name = "sha256", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 20, + .prf_len = 32, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256, + .name = "aes256-cts-hmac-sha384-192", + .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))", + .cksum_name = "hmac(sha384)", + .hash_name = "sha384", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 24, + .Ke_len = 32, + .Ki_len = 24, + .block_len = 16, + .conf_len = 16, + .cksum_len = 24, + .hash_len = 20, + .prf_len = 48, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c new file mode 100644 index 000000000000..4519c572d37e --- /dev/null +++ b/crypto/krb5/selftest.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +#define VALID(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +#define CHECK(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +enum which_key { + TEST_KC, TEST_KE, TEST_KI, +}; + +#if 0 +static void dump_sg(struct scatterlist *sg, unsigned int limit) +{ + unsigned int index = 0, n = 0; + + for (; sg && limit > 0; sg = sg_next(sg)) { + unsigned int off = sg->offset, len = umin(sg->length, limit); + const void *p = kmap_local_page(sg_page(sg)); + + limit -= len; + while (len > 0) { + unsigned int part = umin(len, 32); + + pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off); + index += part; + off += part; + len -= part; + } + + kunmap_local(p); + n++; + } +} +#endif + +static int prep_buf(struct krb5_buffer *buf) +{ + buf->data = kmalloc(buf->len, GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + return 0; +} + +#define PREP_BUF(BUF, LEN) \ + do { \ + (BUF)->len = (LEN); \ + ret = prep_buf((BUF)); \ + if (ret < 0) \ + goto out; \ + } while (0) + +static int load_buf(struct krb5_buffer *buf, const char *from) +{ + size_t len = strlen(from); + int ret; + + if (len > 1 && from[0] == '\'') { + PREP_BUF(buf, len - 1); + memcpy(buf->data, from + 1, len - 1); + ret = 0; + goto out; + } + + if (VALID(len & 1)) + return -EINVAL; + + PREP_BUF(buf, len / 2); + ret = hex2bin(buf->data, from, buf->len); + if (ret < 0) { + VALID(1); + goto out; + } +out: + return ret; +} + +#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0) + +static void clear_buf(struct krb5_buffer *buf) +{ + kfree(buf->data); + buf->len = 0; + buf->data = NULL; +} + +/* + * Perform a pseudo-random function check. + */ +static int krb5_test_one_prf(const struct krb5_prf_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer key = {}, octet = {}, result = {}, prf = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&key, test->key); + LOAD_BUF(&octet, test->octet); + LOAD_BUF(&prf, test->prf); + PREP_BUF(&result, krb5->prf_len); + + if (VALID(result.len != prf.len)) { + ret = -EINVAL; + goto out; + } + + ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL); + if (ret < 0) { + CHECK(1); + pr_warn("PRF calculation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, prf.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&result); + clear_buf(&prf); + clear_buf(&octet); + clear_buf(&key); + return ret; +} + +/* + * Perform a key derivation check. + */ +static int krb5_test_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_key_test_one *test, + enum which_key which) +{ + struct krb5_buffer key = {}, result = {}; + int ret; + + LOAD_BUF(&key, test->key); + PREP_BUF(&result, key.len); + + switch (which) { + case TEST_KC: + ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KE: + ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KI: + ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + default: + VALID(1); + ret = -EINVAL; + goto out; + } + + if (ret < 0) { + CHECK(1); + pr_warn("Key derivation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, key.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + +out: + clear_buf(&key); + clear_buf(&result); + return ret; +} + +static int krb5_test_one_key(const struct krb5_key_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer base_key = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&base_key, test->key); + + ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI); + if (ret < 0) + goto out; + +out: + clear_buf(&base_key); + return ret; +} + +/* + * Perform an encryption test. + */ +static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_aead *ci = NULL; + struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {}; + struct krb5_buffer conf = {}, plain = {}, ct = {}; + struct scatterlist sg[1]; + size_t data_len, data_offset, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Load the test data into binary buffers. */ + LOAD_BUF(&conf, test->conf); + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&ct, test->ct); + + if (test->K0) { + LOAD_BUF(&K0, test->K0); + } else { + LOAD_BUF(&Ke, test->Ke); + LOAD_BUF(&Ki, test->Ki); + + ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + } + + if (VALID(conf.len != krb5->conf_len) || + VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len)) + goto out; + + data_len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE, + data_len, &data_offset); + + if (CHECK(message_len != ct.len)) { + pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len); + goto out; + } + if (CHECK(data_offset != conf.len)) { + pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len); + goto out; + } + + memcpy(buf, conf.data, conf.len); + memcpy(buf + data_offset, plain.data, plain.len); + + /* Allocate a crypto object and set its key. */ + if (test->K0) + ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL); + else + ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL); + + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret); + goto out; + } + + /* Encrypt the message. */ + sg_init_one(sg, buf, message_len); + ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len, + data_offset, data_len, true); + if (ret < 0) { + CHECK(1); + pr_warn("Encryption failed %d\n", ret); + goto out; + } + if (ret != message_len) { + CHECK(1); + pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len); + goto out; + } + + if (memcmp(buf, ct.data, ct.len) != 0) { + CHECK(1); + pr_warn("Ciphertext mismatch\n"); + pr_warn("BUF %*phN\n", ct.len, buf); + pr_warn("CT %*phN\n", ct.len, ct.data); + pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Decrypt the encrypted message. */ + data_offset = 0; + data_len = message_len; + ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len); + if (ret < 0) { + CHECK(1); + pr_warn("Decryption failed %d\n", ret); + goto out; + } + + if (CHECK(data_offset != conf.len) || + CHECK(data_len != plain.len)) + goto out; + + if (memcmp(buf, conf.data, conf.len) != 0) { + CHECK(1); + pr_warn("Confounder mismatch\n"); + pr_warn("ENC %*phN\n", conf.len, buf); + pr_warn("DEC %*phN\n", conf.len, conf.data); + ret = -EKEYREJECTED; + goto out; + } + + if (memcmp(buf + conf.len, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + conf.len); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&ct); + clear_buf(&plain); + clear_buf(&conf); + clear_buf(&keys); + clear_buf(&Ki); + clear_buf(&Ke); + clear_buf(&K0); + if (ci) + crypto_free_aead(ci); + return ret; +} + +/* + * Perform a checksum test. + */ +static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_shash *ci = NULL; + struct scatterlist sg[1]; + struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {}; + size_t offset, len, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Allocate a crypto object and set its key. */ + if (test->K0) { + LOAD_BUF(&K0, test->K0); + ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL); + } else { + LOAD_BUF(&Kc, test->Kc); + + ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + + ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL); + } + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret); + goto out; + } + + /* Load the test data into binary buffers. */ + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&mic, test->mic); + + len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE, + len, &offset); + + if (CHECK(message_len != mic.len + plain.len)) { + pr_warn("MIC length mismatch %zu != %u\n", + message_len, mic.len + plain.len); + goto out; + } + + memcpy(buf + offset, plain.data, plain.len); + + /* Generate a MIC generation request. */ + sg_init_one(sg, buf, 1024); + + ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024, + krb5->cksum_len, plain.len); + if (ret < 0) { + CHECK(1); + pr_warn("Get MIC failed %d\n", ret); + goto out; + } + len = ret; + + if (CHECK(len != plain.len + mic.len)) { + pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len); + goto out; + } + + if (memcmp(buf, mic.data, mic.len) != 0) { + CHECK(1); + pr_warn("MIC mismatch\n"); + pr_warn("BUF %*phN\n", mic.len, buf); + pr_warn("MIC %*phN\n", mic.len, mic.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Generate a verification request. */ + offset = 0; + ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len); + if (ret < 0) { + CHECK(1); + pr_warn("Verify MIC failed %d\n", ret); + goto out; + } + + if (CHECK(offset != mic.len) || + CHECK(len != plain.len)) + goto out; + + if (memcmp(buf + offset, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + offset); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&mic); + clear_buf(&plain); + clear_buf(&keys); + clear_buf(&K0); + clear_buf(&Kc); + if (ci) + crypto_free_shash(ci); + return ret; +} + +int krb5_selftest(void) +{ + void *buf; + int ret = 0, i; + + buf = kmalloc(4096, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pr_notice("\n"); + pr_notice("Running selftests\n"); + + for (i = 0; krb5_prf_tests[i].name; i++) { + ret = krb5_test_one_prf(&krb5_prf_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_prf_tests[i].name); + } + } + + for (i = 0; krb5_key_tests[i].name; i++) { + ret = krb5_test_one_key(&krb5_key_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_key_tests[i].name); + } + } + + for (i = 0; krb5_enc_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_enc(&krb5_enc_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_enc_tests[i].name); + } + } + + for (i = 0; krb5_mic_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_mic(&krb5_mic_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_mic_tests[i].name); + } + } + + ret = 0; +out: + pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed"); + kfree(buf); + return ret; +} diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c new file mode 100644 index 000000000000..24447ee8bf07 --- /dev/null +++ b/crypto/krb5/selftest_data.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Data for Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +/* + * Pseudo-random function tests. + */ +const struct krb5_prf_test krb5_prf_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "prf", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .octet = "74657374", + .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "prf", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .octet = "74657374", + .prf = + "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D" + "B3C1B62AD1B8553360D17367EB1514D2", + }, + {/* END */} +}; + +/* + * Key derivation tests. + */ +const struct krb5_key_test krb5_key_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "key", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .Kc.use = 0x00000002, + .Kc.key = "B31A018A48F54776F403E9A396325DC3", + .Ke.use = 0x00000002, + .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki.use = 0x00000002, + .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "key", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .Kc.use = 0x00000002, + .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .Ke.use = 0x00000002, + .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki.use = 0x00000002, + .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "key", + .key = "57D0297298FFD9D35DE5A47FB4BDE24B", + .Kc.use = 0x00000002, + .Kc.key = "D155775A209D05F02B38D42A389E5A56", + .Ke.use = 0x00000002, + .Ke.key = "64DF83F85A532F17577D8C37035796AB", + .Ki.use = 0x00000002, + .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635", + }, + { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "key", + .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C", + .Kc.use = 0x00000002, + .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50", + .Ke.use = 0x00000002, + .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604", + .Ki.use = 0x00000002, + .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0", + }, + {/* END */} +}; + +/* + * Encryption tests. + */ +const struct krb5_enc_test krb5_enc_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc no plain", + .plain = "", + .conf = "7E5895EAF2672435BAD817F545A37148", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "56AB21713FF62C0A1457200F6FA9948F", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "A7A4E29A4728CE10664FB64E49AD3FAC", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc no plain", + .plain = "", + .conf = "F764E9FA15C276478B2C7D0C4E5F58E4", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "B80D3251C1F6471494256FFE712D0B9A", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "53BF8A0D105265D4E276428624CE5E63", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "763E65367E864F02F55153C7E3B58AF1", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 0, + .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "6F2FC3C2A166FD8898967A83DE9596D9", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 1, + .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "A5B4A71E077AEEF93C8763C18FDB1F10", + .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0", + .usage = 2, + .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "19FEE40D810C524B5B22F01874C693DA", + .K0 = "2CA27A5FAF5532244506434E1CEF6676", + .usage = 3, + .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "CA7A7AB4BE192DABD603506DB19C39E2", + .K0 = "7824F8C16F83FF354C6BF7515B973F43", + .usage = 4, + .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "3CBBD2B45917941067F96599BB98926C", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 0, + .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2", + .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833", + .usage = 1, + .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "AD4FF904D34E555384B14100FC465F88", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 2, + .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514", + .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715", + .usage = 3, + .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "644DEF38DA35007275878D216855E228", + .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7", + .usage = 4, + .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474", + }, + {/* END */} +}; + +/* + * Checksum generation tests. + */ +const struct krb5_mic_test krb5_mic_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "B31A018A48F54776F403E9A396325DC3", + .mic = "D78367186643D67B411CBA9139FC1DEE", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic abc", + .plain = "'abcdefghijk", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 7, + .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic ABC", + .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 8, + .mic = "D1B34F7004A731F23A0C00BF6C3F753A", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic 123", + .plain = "'123456789", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 9, + .mic = "87A12CFD2B96214810F01C826E7744B1", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic !@#", + .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 10, + .mic = "3FA0B42355E52B189187294AA252AB64", + }, + {/* END */} +}; diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c new file mode 100644 index 000000000000..a1de55994d92 --- /dev/null +++ b/crypto/krb5enc.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AEAD wrapper for Kerberos 5 RFC3961 simplified profile. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * Derived from authenc: + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/authenc.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct krb5enc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct krb5enc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; +}; + +struct krb5enc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[]; +}; + +static void krb5enc_request_complete(struct aead_request *req, int err) +{ + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/** + * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob. + * @keys: Where to put the key sizes and pointers + * @key: Encoded key material + * @keylen: Amount of key material + * + * Decode the key blob we're given. It starts with an rtattr that indicates + * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is: + * + * rtattr || __be32 enckeylen || authkey || enckey + * + * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be + * handled correctly in static testmgr data. + */ +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen) +{ + struct rtattr *rta = (struct rtattr *)key; + struct crypto_authenc_key_param *param; + + if (!RTA_OK(rta, keylen)) + return -EINVAL; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + return -EINVAL; + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) + return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); + + param = RTA_DATA(rta); + keys->enckeylen = be32_to_cpu(param->enckeylen); + + key += rta->rta_len; + keylen -= rta->rta_len; + + if (keylen < keys->enckeylen) + return -EINVAL; + + keys->authkeylen = keylen - keys->enckeylen; + keys->authkey = key; + keys->enckey = key + keys->authkeylen; + return 0; +} +EXPORT_SYMBOL(crypto_krb5enc_extractkeys); + +static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct crypto_skcipher *enc = ctx->enc; + struct crypto_ahash *auth = ctx->auth; + unsigned int flags = crypto_aead_get_flags(krb5enc); + int err = -EINVAL; + + if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0) + goto out; + + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); + if (err) + goto out; + + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); +out: + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static void krb5enc_encrypt_done(void *data, int err) +{ + struct aead_request *req = data; + + krb5enc_request_complete(req, err); +} + +/* + * Start the encryption of the plaintext. We skip over the associated data as + * that only gets included in the hash. + */ +static int krb5enc_dispatch_encrypt(struct aead_request *req, + unsigned int flags) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct crypto_skcipher *enc = ctx->enc; + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + if (req->src == req->dst) + dst = src; + else + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + krb5enc_encrypt_done, req); + skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(skreq); +} + +/* + * Insert the hash into the checksum field in the destination buffer directly + * after the encrypted region. + */ +static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + + scatterwalk_map_and_copy(hash, req->dst, + req->assoclen + req->cryptlen, + crypto_aead_authsize(krb5enc), 1); +} + +/* + * Upon completion of an asynchronous digest, transfer the hash to the checksum + * field. + */ +static void krb5enc_encrypt_ahash_done(void *data, int err) +{ + struct aead_request *req = data; + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + + if (err) + return krb5enc_request_complete(req, err); + + krb5enc_insert_checksum(req, ahreq->result); + + err = krb5enc_dispatch_encrypt(req, 0); + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/* + * Start the digest of the plaintext for encryption. In theory, this could be + * run in parallel with the encryption, provided the src and dst buffers don't + * overlap. + */ +static int krb5enc_dispatch_encrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct crypto_ahash *auth = ctx->auth; + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_encrypt_ahash_done, req); + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen); + + err = crypto_ahash_digest(ahreq); + if (err) + return err; + + krb5enc_insert_checksum(req, hash); + return 0; +} + +/* + * Process an encryption operation. We can perform the cipher and the hash in + * parallel, provided the src and dst buffers are separate. + */ +static int krb5enc_encrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_encrypt_hash(req); + if (err < 0) + return err; + + return krb5enc_dispatch_encrypt(req, aead_request_flags(req)); +} + +static int krb5enc_verify_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *calc_hash = areq_ctx->tail; + u8 *msg_hash = areq_ctx->tail + authsize; + + scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0); + + if (crypto_memneq(msg_hash, calc_hash, authsize)) + return -EBADMSG; + return 0; +} + +static void krb5enc_decrypt_hash_done(void *data, int err) +{ + struct aead_request *req = data; + + if (err) + return krb5enc_request_complete(req, err); + + err = krb5enc_verify_hash(req); + krb5enc_request_complete(req, err); +} + +/* + * Dispatch the hashing of the plaintext after we've done the decryption. + */ +static int krb5enc_dispatch_decrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + struct crypto_ahash *auth = ctx->auth; + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->dst, hash, + req->assoclen + req->cryptlen - authsize); + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_decrypt_hash_done, req); + + err = crypto_ahash_digest(ahreq); + if (err < 0) + return err; + + return krb5enc_verify_hash(req); +} + +/* + * Dispatch the decryption of the ciphertext. + */ +static int krb5enc_dispatch_decrypt(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + dst = src; + + if (req->src != req->dst) + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, ctx->enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(skreq, src, dst, + req->cryptlen - authsize, req->iv); + + return crypto_skcipher_decrypt(skreq); +} + +static int krb5enc_decrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_decrypt(req); + if (err < 0) + return err; + + return krb5enc_dispatch_decrypt_hash(req); +} + +static int krb5enc_init_tfm(struct crypto_aead *tfm) +{ + struct aead_instance *inst = aead_alg_instance(tfm); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + int err; + + auth = crypto_spawn_ahash(&ictx->auth); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + enc = crypto_spawn_skcipher(&ictx->enc); + err = PTR_ERR(enc); + if (IS_ERR(enc)) + goto err_free_ahash; + + ctx->auth = auth; + ctx->enc = enc; + + crypto_aead_set_reqsize( + tfm, + sizeof(struct krb5enc_request_ctx) + + ictx->reqoff + /* Space for two checksums */ + umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth), + sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc))); + + return 0; + +err_free_ahash: + crypto_free_ahash(auth); + return err; +} + +static void krb5enc_exit_tfm(struct crypto_aead *tfm) +{ + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_ahash(ctx->auth); + crypto_free_skcipher(ctx->enc); +} + +static void krb5enc_free(struct aead_instance *inst) +{ + struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->enc); + crypto_drop_ahash(&ctx->auth); + kfree(inst); +} + +/* + * Create an instance of a template for a specific hash and cipher pair. + */ +static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct krb5enc_instance_ctx *ictx; + struct skcipher_alg_common *enc; + struct hash_alg_common *auth; + struct aead_instance *inst; + struct crypto_alg *auth_base; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) { + pr_err("attr_type failed\n"); + return err; + } + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = aead_instance_ctx(inst); + + err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) { + pr_err("grab ahash failed\n"); + goto err_free_inst; + } + auth = crypto_spawn_ahash_alg(&ictx->auth); + auth_base = &auth->base; + + err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[2]), 0, mask); + if (err) { + pr_err("grab skcipher failed\n"); + goto err_free_inst; + } + enc = crypto_spawn_skcipher_alg_common(&ictx->enc); + + ictx->reqoff = 2 * auth->digestsize; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_name, + enc->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_driver_name, + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + + auth_base->cra_priority; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx); + + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; + inst->alg.maxauthsize = auth->digestsize; + + inst->alg.init = krb5enc_init_tfm; + inst->alg.exit = krb5enc_exit_tfm; + + inst->alg.setkey = krb5enc_setkey; + inst->alg.encrypt = krb5enc_encrypt; + inst->alg.decrypt = krb5enc_decrypt; + + inst->free = krb5enc_free; + + err = aead_register_instance(tmpl, inst); + if (err) { + pr_err("ref failed\n"); + goto err_free_inst; + } + + return 0; + +err_free_inst: + krb5enc_free(inst); + return err; +} + +static struct crypto_template crypto_krb5enc_tmpl = { + .name = "krb5enc", + .create = krb5enc_create, + .module = THIS_MODULE, +}; + +static int __init crypto_krb5enc_module_init(void) +{ + return crypto_register_template(&crypto_krb5enc_tmpl); +} + +static void __exit crypto_krb5enc_module_exit(void) +{ + crypto_unregister_template(&crypto_krb5enc_tmpl); +} + +module_init(crypto_krb5enc_module_init); +module_exit(crypto_krb5enc_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961"); +MODULE_ALIAS_CRYPTO("krb5enc"); diff --git a/crypto/lrw.c b/crypto/lrw.c index bcf09fbc750a..dd403b800513 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) while (w.nbytes) { unsigned int avail = w.nbytes; - be128 *wsrc; + const be128 *wsrc; be128 *wdst; wsrc = w.src.virt.addr; @@ -205,9 +205,9 @@ static int lrw_xor_tweak_post(struct skcipher_request *req) return lrw_xor_tweak(req, true); } -static void lrw_crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (!err) { struct lrw_request_ctx *rctx = skcipher_request_ctx(req); @@ -299,8 +299,8 @@ static void lrw_free_instance(struct skcipher_instance *inst) static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; @@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -336,13 +336,13 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", @@ -356,11 +356,11 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + if (!memcmp(cipher_name, "ecb(", 4)) { + int len; - len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); - if (len < 2 || len >= sizeof(ecb_name)) + len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); + if (len < 2) goto err_free_inst; if (ecb_name[len - 1] != ')') @@ -382,10 +382,8 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) (__alignof__(be128) - 1); inst->alg.ivsize = LRW_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - LRW_BLOCK_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - LRW_BLOCK_SIZE; + inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; + inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); @@ -422,9 +420,10 @@ static void __exit lrw_module_exit(void) crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(lrw_module_init); +module_init(lrw_module_init); module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); MODULE_ALIAS_CRYPTO("lrw"); +MODULE_SOFTDEP("pre: ecb"); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c new file mode 100644 index 000000000000..c2e2c38b5aa8 --- /dev/null +++ b/crypto/lskcipher.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linear symmetric key cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers. + * + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <linux/cryptouser.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <net/netlink.h> +#include "skcipher.h" + +static inline struct crypto_lskcipher *__crypto_lskcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_lskcipher, base); +} + +static inline struct lskcipher_alg *__crypto_lskcipher_alg( + struct crypto_alg *alg) +{ + return container_of(alg, struct lskcipher_alg, co.base); +} + +static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + u8 *buffer, *alignbuffer; + unsigned long absize; + int ret; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = cipher->setkey(tfm, alignbuffer, keylen); + kfree_sensitive(buffer); + return ret; +} + +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + + if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize) + return -EINVAL; + + if ((unsigned long)key & alignmask) + return lskcipher_setkey_unaligned(tfm, key, keylen); + else + return cipher->setkey(tfm, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey); + +static int crypto_lskcipher_crypt_unaligned( + struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len, + u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, u32 flags)) +{ + unsigned statesize = crypto_lskcipher_statesize(tfm); + unsigned ivsize = crypto_lskcipher_ivsize(tfm); + unsigned bs = crypto_lskcipher_blocksize(tfm); + unsigned cs = crypto_lskcipher_chunksize(tfm); + int err; + u8 *tiv; + u8 *p; + + BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE || + MAX_CIPHER_ALIGNMASK >= PAGE_SIZE); + + tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (!tiv) + return -ENOMEM; + + memcpy(tiv, iv, ivsize + statesize); + + p = kmalloc(PAGE_SIZE, GFP_ATOMIC); + err = -ENOMEM; + if (!p) + goto out; + + while (len >= bs) { + unsigned chunk = min((unsigned)PAGE_SIZE, len); + int err; + + if (chunk > cs) + chunk &= ~(cs - 1); + + memcpy(p, src, chunk); + err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL); + if (err) + goto out; + + memcpy(dst, p, chunk); + src += chunk; + dst += chunk; + len -= chunk; + } + + err = len ? -EINVAL : 0; + +out: + memcpy(iv, tiv, ivsize + statesize); + kfree_sensitive(p); + kfree_sensitive(tiv); + return err; +} + +static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + u32 flags)) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + + if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & + alignmask) + return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); + + return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); +} + +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); + +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); + +static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *ivs, + u32 flags)) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + u8 *ivs = skcipher_request_ctx(req); + struct crypto_lskcipher *tfm = *ctx; + struct skcipher_walk walk; + unsigned ivsize; + u32 flags; + int err; + + ivsize = crypto_lskcipher_ivsize(tfm); + ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1); + memcpy(ivs, req->iv, ivsize); + + flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT) + flags |= CRYPTO_LSKCIPHER_FLAG_CONT; + + if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL)) + flags |= CRYPTO_LSKCIPHER_FLAG_FINAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, ivs, + flags & ~(walk.nbytes == walk.total ? + 0 : CRYPTO_LSKCIPHER_FLAG_FINAL)); + err = skcipher_walk_done(&walk, err); + flags |= CRYPTO_LSKCIPHER_FLAG_CONT; + } + + memcpy(req->iv, ivs, ivsize); + + return err; +} + +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->encrypt); +} + +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->decrypt); +} + +static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + alg->exit(skcipher); +} + +static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + if (alg->exit) + skcipher->base.exit = crypto_lskcipher_exit_tfm; + + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_lskcipher_free_instance(struct crypto_instance *inst) +{ + struct lskcipher_instance *skcipher = + container_of(inst, struct lskcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void __maybe_unused crypto_lskcipher_show( + struct seq_file *m, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + + seq_printf(m, "type : lskcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize); + seq_printf(m, "statesize : %u\n", skcipher->co.statesize); +} + +static int __maybe_unused crypto_lskcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_report_blkcipher rblkcipher; + + memset(&rblkcipher, 0, sizeof(rblkcipher)); + + strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type)); + strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->co.min_keysize; + rblkcipher.max_keysize = skcipher->co.max_keysize; + rblkcipher.ivsize = skcipher->co.ivsize; + + return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(rblkcipher), &rblkcipher); +} + +static const struct crypto_type crypto_lskcipher_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_lskcipher_init_tfm, + .free = crypto_lskcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_lskcipher_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_lskcipher_report, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_LSKCIPHER, + .tfmsize = offsetof(struct crypto_lskcipher, base), + .algsize = offsetof(struct lskcipher_alg, co.base), +}; + +static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_lskcipher *skcipher; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type); + if (IS_ERR(skcipher)) { + crypto_mod_put(calg); + return PTR_ERR(skcipher); + } + + *ctx = skcipher; + tfm->exit = crypto_lskcipher_exit_tfm_sg; + + return 0; +} + +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_lskcipher_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_lskcipher); + +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher); + +static int lskcipher_prepare_alg(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->co.chunksize & (alg->co.chunksize - 1)) + return -EINVAL; + + base->cra_type = &crypto_lskcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER; + + return 0; +} + +int crypto_register_lskcipher(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = lskcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_lskcipher); + +void crypto_unregister_lskcipher(struct lskcipher_alg *alg) +{ + crypto_unregister_alg(&alg->co.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher); + +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_lskcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_lskciphers); + +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers); + +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst) +{ + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; + + err = lskcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(lskcipher_register_instance); + +static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm); + + crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_lskcipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher_spawn *spawn; + struct crypto_lskcipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_lskcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +static void lskcipher_free_instance_simple(struct lskcipher_instance *inst) +{ + crypto_drop_lskcipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * lskcipher_alloc_instance_simple - allocate instance of simple block cipher + * + * Allocate an lskcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to + * struct crypto_lskcipher *, and default ->setkey(), ->init(), and + * ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + struct lskcipher_instance *inst; + struct crypto_lskcipher_spawn *spawn; + char ecb_name[CRYPTO_MAX_ALG_NAME]; + struct lskcipher_alg *cipher_alg; + const char *cipher_name; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return ERR_CAST(cipher_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = lskcipher_instance_ctx(inst); + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + cipher_name, 0, mask); + + ecb_name[0] = 0; + if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) { + err = -ENAMETOOLONG; + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + ecb_name, 0, mask); + } + + if (err) + goto err_free_inst; + + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + &cipher_alg->co.base); + if (err) + goto err_free_inst; + + if (ecb_name[0]) { + int len; + + err = -EINVAL; + len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], + sizeof(ecb_name)); + if (len < 2) + goto err_free_inst; + + if (ecb_name[len - 1] != ')') + goto err_free_inst; + + ecb_name[len - 1] = 0; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, ecb_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (strcmp(ecb_name, cipher_name) && + snprintf(inst->alg.co.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, cipher_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + } else { + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + } + + err = -EINVAL; + if (cipher_alg->co.ivsize) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority; + inst->alg.co.min_keysize = cipher_alg->co.min_keysize; + inst->alg.co.max_keysize = cipher_alg->co.max_keysize; + inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize; + inst->alg.co.statesize = cipher_alg->co.statesize; + + /* Use struct crypto_lskcipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *); + inst->alg.setkey = lskcipher_setkey_simple; + inst->alg.init = lskcipher_init_tfm_simple; + inst->alg.exit = lskcipher_exit_tfm_simple; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple(inst); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple); diff --git a/crypto/lz4.c b/crypto/lz4.c index 0606f8862e78..57b713516aef 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -12,11 +12,7 @@ #include <linux/lz4.h> #include <crypto/internal/scompress.h> -struct lz4_ctx { - void *lz4_comp_mem; -}; - -static void *lz4_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4_alloc_ctx(void) { void *ctx; @@ -27,29 +23,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4_init(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4_free_ctx(void *ctx) { vfree(ctx); } -static void lz4_exit(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4_free_ctx(NULL, ctx->lz4_comp_mem); -} - static int __lz4_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -70,14 +48,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); -} - static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -97,29 +67,11 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4 = { - .cra_name = "lz4", - .cra_driver_name = "lz4-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4_init, - .cra_exit = lz4_exit, - .cra_u = { .compress = { - .coa_compress = lz4_compress_crypto, - .coa_decompress = lz4_decompress_crypto } } -}; - static struct scomp_alg scomp = { - .alloc_ctx = lz4_alloc_ctx, - .free_ctx = lz4_free_ctx, + .streams = { + .alloc_ctx = lz4_alloc_ctx, + .free_ctx = lz4_free_ctx, + }, .compress = lz4_scompress, .decompress = lz4_sdecompress, .base = { @@ -131,28 +83,15 @@ static struct scomp_alg scomp = { static int __init lz4_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4_mod_fini(void) { - crypto_unregister_alg(&alg_lz4); crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4_mod_init); +module_init(lz4_mod_init); module_exit(lz4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index d7cc94aa2fcf..bb84f8a68cb5 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -4,18 +4,13 @@ * * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> -#include <crypto/internal/scompress.h> - -struct lz4hc_ctx { - void *lz4hc_comp_mem; -}; -static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4hc_alloc_ctx(void) { void *ctx; @@ -26,29 +21,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4hc_init(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4hc_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4hc_free_ctx(void *ctx) { vfree(ctx); } -static void lz4hc_exit(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); -} - static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -69,16 +46,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4hc_compress_crypto(src, slen, dst, dlen, - ctx->lz4hc_comp_mem); -} - static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -98,29 +65,11 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4hc = { - .cra_name = "lz4hc", - .cra_driver_name = "lz4hc-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4hc_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4hc_init, - .cra_exit = lz4hc_exit, - .cra_u = { .compress = { - .coa_compress = lz4hc_compress_crypto, - .coa_decompress = lz4hc_decompress_crypto } } -}; - static struct scomp_alg scomp = { - .alloc_ctx = lz4hc_alloc_ctx, - .free_ctx = lz4hc_free_ctx, + .streams = { + .alloc_ctx = lz4hc_alloc_ctx, + .free_ctx = lz4hc_free_ctx, + }, .compress = lz4hc_scompress, .decompress = lz4hc_sdecompress, .base = { @@ -132,28 +81,15 @@ static struct scomp_alg scomp = { static int __init lz4hc_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4hc); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4hc); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4hc_mod_fini(void) { - crypto_unregister_alg(&alg_lz4hc); crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4hc_mod_init); +module_init(lz4hc_mod_init); module_exit(lz4hc_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..794e7ec49536 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -3,19 +3,13 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> - -struct lzorle_ctx { - void *lzorle_comp_mem; -}; +#include <linux/module.h> +#include <linux/slab.h> -static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) +static void *lzorle_alloc_ctx(void) { void *ctx; @@ -26,36 +20,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzorle_init(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL); - if (IS_ERR(ctx->lzorle_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzorle_free_ctx(void *ctx) { kvfree(ctx); } -static void lzorle_exit(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - lzorle_free_ctx(NULL, ctx->lzorle_comp_mem); -} - static int __lzorle_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +40,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem); -} - static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +62,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzorle_decompress(src, slen, dst, dlen); -} - static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,22 +69,11 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzorle_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo-rle", - .cra_driver_name = "lzo-rle-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzorle_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzorle_init, - .cra_exit = lzorle_exit, - .cra_u = { .compress = { - .coa_compress = lzorle_compress, - .coa_decompress = lzorle_decompress } } -}; - static struct scomp_alg scomp = { - .alloc_ctx = lzorle_alloc_ctx, - .free_ctx = lzorle_free_ctx, + .streams = { + .alloc_ctx = lzorle_alloc_ctx, + .free_ctx = lzorle_free_ctx, + }, .compress = lzorle_scompress, .decompress = lzorle_sdecompress, .base = { @@ -134,28 +85,15 @@ static struct scomp_alg scomp = { static int __init lzorle_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzorle_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } -subsys_initcall(lzorle_mod_init); +module_init(lzorle_mod_init); module_exit(lzorle_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..d43242b24b4e 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -3,19 +3,13 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> - -struct lzo_ctx { - void *lzo_comp_mem; -}; +#include <linux/module.h> +#include <linux/slab.h> -static void *lzo_alloc_ctx(struct crypto_scomp *tfm) +static void *lzo_alloc_ctx(void) { void *ctx; @@ -26,36 +20,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzo_init(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); - if (IS_ERR(ctx->lzo_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzo_free_ctx(void *ctx) { kvfree(ctx); } -static void lzo_exit(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - lzo_free_ctx(NULL, ctx->lzo_comp_mem); -} - static int __lzo_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +40,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen, return 0; } -static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); -} - static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +62,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzo_decompress(src, slen, dst, dlen); -} - static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,22 +69,11 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzo_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo", - .cra_driver_name = "lzo-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzo_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzo_init, - .cra_exit = lzo_exit, - .cra_u = { .compress = { - .coa_compress = lzo_compress, - .coa_decompress = lzo_decompress } } -}; - static struct scomp_alg scomp = { - .alloc_ctx = lzo_alloc_ctx, - .free_ctx = lzo_free_ctx, + .streams = { + .alloc_ctx = lzo_alloc_ctx, + .free_ctx = lzo_free_ctx, + }, .compress = lzo_scompress, .decompress = lzo_sdecompress, .base = { @@ -134,28 +85,15 @@ static struct scomp_alg scomp = { static int __init lzo_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzo_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } -subsys_initcall(lzo_mod_init); +module_init(lzo_mod_init); module_exit(lzo_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md4.c b/crypto/md4.c index 2e7f2f319f95..55bf47e23c13 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(md4_mod_init); +module_init(md4_mod_init); module_exit(md4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md5.c b/crypto/md5.c index 72c0c46fb5ee..c167d203c710 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -1,27 +1,62 @@ -/* - * Cryptographic API. - * - * MD5 Message Digest Algorithm (RFC1321). - * - * Derived from cryptoapi implementation, originally based on the - * public domain implementation written by Colin Plumb in 1993. - * - * Copyright (c) Cryptoapi developers. - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for MD5 and HMAC-MD5 * + * Copyright 2025 Google LLC */ #include <crypto/internal/hash.h> #include <crypto/md5.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/string.h> -#include <linux/types.h> -#include <asm/byteorder.h> + +/* + * Export and import functions. crypto_shash wants a particular format that + * matches that used by some legacy drivers. It currently is the same as the + * library MD5 context, except the value in bytecount must be block-aligned and + * the remainder must be stored in an extra u8 appended to the struct. + */ + +#define MD5_SHASH_STATE_SIZE (sizeof(struct md5_ctx) + 1) +static_assert(sizeof(struct md5_ctx) == sizeof(struct md5_state)); +static_assert(offsetof(struct md5_ctx, state) == offsetof(struct md5_state, hash)); +static_assert(offsetof(struct md5_ctx, bytecount) == offsetof(struct md5_state, byte_count)); +static_assert(offsetof(struct md5_ctx, buf) == offsetof(struct md5_state, block)); + +static int __crypto_md5_export(const struct md5_ctx *ctx0, void *out) +{ + struct md5_ctx ctx = *ctx0; + unsigned int partial; + u8 *p = out; + + partial = ctx.bytecount % MD5_BLOCK_SIZE; + ctx.bytecount -= partial; + memcpy(p, &ctx, sizeof(ctx)); + p += sizeof(ctx); + *p = partial; + return 0; +} + +static int __crypto_md5_import(struct md5_ctx *ctx, const void *in) +{ + const u8 *p = in; + + memcpy(ctx, p, sizeof(*ctx)); + p += sizeof(*ctx); + ctx->bytecount += *p; + return 0; +} + +static int __crypto_md5_export_core(const struct md5_ctx *ctx, void *out) +{ + memcpy(out, ctx, offsetof(struct md5_ctx, buf)); + return 0; +} + +static int __crypto_md5_import_core(struct md5_ctx *ctx, const void *in) +{ + memcpy(ctx, in, offsetof(struct md5_ctx, buf)); + return 0; +} const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, @@ -29,222 +64,173 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { }; EXPORT_SYMBOL_GPL(md5_zero_message_hash); -#define F1(x, y, z) (z ^ (x & (y ^ z))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) (x ^ y ^ z) -#define F4(x, y, z) (y ^ (x | ~z)) - -#define MD5STEP(f, w, x, y, z, in, s) \ - (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) - -static void md5_transform(__u32 *hash, __u32 const *in) -{ - u32 a, b, c, d; - - a = hash[0]; - b = hash[1]; - c = hash[2]; - d = hash[3]; - - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); - - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); - - hash[0] += a; - hash[1] += b; - hash[2] += c; - hash[3] += d; -} - -static inline void md5_transform_helper(struct md5_state *ctx) -{ - le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); - md5_transform(ctx->hash, ctx->block); -} - -static int md5_init(struct shash_desc *desc) -{ - struct md5_state *mctx = shash_desc_ctx(desc); - - mctx->hash[0] = MD5_H0; - mctx->hash[1] = MD5_H1; - mctx->hash[2] = MD5_H2; - mctx->hash[3] = MD5_H3; - mctx->byte_count = 0; +#define MD5_CTX(desc) ((struct md5_ctx *)shash_desc_ctx(desc)) +static int crypto_md5_init(struct shash_desc *desc) +{ + md5_init(MD5_CTX(desc)); return 0; } -static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) +static int crypto_md5_update(struct shash_desc *desc, + const u8 *data, unsigned int len) { - struct md5_state *mctx = shash_desc_ctx(desc); - const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + md5_update(MD5_CTX(desc), data, len); + return 0; +} - mctx->byte_count += len; +static int crypto_md5_final(struct shash_desc *desc, u8 *out) +{ + md5_final(MD5_CTX(desc), out); + return 0; +} - if (avail > len) { - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, len); - return 0; - } +static int crypto_md5_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + md5(data, len, out); + return 0; +} - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, avail); +static int crypto_md5_export(struct shash_desc *desc, void *out) +{ + return __crypto_md5_export(MD5_CTX(desc), out); +} - md5_transform_helper(mctx); - data += avail; - len -= avail; +static int crypto_md5_import(struct shash_desc *desc, const void *in) +{ + return __crypto_md5_import(MD5_CTX(desc), in); +} - while (len >= sizeof(mctx->block)) { - memcpy(mctx->block, data, sizeof(mctx->block)); - md5_transform_helper(mctx); - data += sizeof(mctx->block); - len -= sizeof(mctx->block); - } +static int crypto_md5_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_md5_export_core(MD5_CTX(desc), out); +} - memcpy(mctx->block, data, len); +static int crypto_md5_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_md5_import_core(MD5_CTX(desc), in); +} + +#define HMAC_MD5_KEY(tfm) ((struct hmac_md5_key *)crypto_shash_ctx(tfm)) +#define HMAC_MD5_CTX(desc) ((struct hmac_md5_ctx *)shash_desc_ctx(desc)) +static int crypto_hmac_md5_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_md5_preparekey(HMAC_MD5_KEY(tfm), raw_key, keylen); return 0; } -static int md5_final(struct shash_desc *desc, u8 *out) +static int crypto_hmac_md5_init(struct shash_desc *desc) { - struct md5_state *mctx = shash_desc_ctx(desc); - const unsigned int offset = mctx->byte_count & 0x3f; - char *p = (char *)mctx->block + offset; - int padding = 56 - (offset + 1); + hmac_md5_init(HMAC_MD5_CTX(desc), HMAC_MD5_KEY(desc->tfm)); + return 0; +} - *p++ = 0x80; - if (padding < 0) { - memset(p, 0x00, padding + sizeof (u64)); - md5_transform_helper(mctx); - p = (char *)mctx->block; - padding = 56; - } +static int crypto_hmac_md5_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_md5_update(HMAC_MD5_CTX(desc), data, len); + return 0; +} - memset(p, 0, padding); - mctx->block[14] = mctx->byte_count << 3; - mctx->block[15] = mctx->byte_count >> 29; - le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - - sizeof(u64)) / sizeof(u32)); - md5_transform(mctx->hash, mctx->block); - cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); - memcpy(out, mctx->hash, sizeof(mctx->hash)); - memset(mctx, 0, sizeof(*mctx)); +static int crypto_hmac_md5_final(struct shash_desc *desc, u8 *out) +{ + hmac_md5_final(HMAC_MD5_CTX(desc), out); + return 0; +} +static int crypto_hmac_md5_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + hmac_md5(HMAC_MD5_KEY(desc->tfm), data, len, out); return 0; } -static int md5_export(struct shash_desc *desc, void *out) +static int crypto_hmac_md5_export(struct shash_desc *desc, void *out) { - struct md5_state *ctx = shash_desc_ctx(desc); + return __crypto_md5_export(&HMAC_MD5_CTX(desc)->hash_ctx, out); +} - memcpy(out, ctx, sizeof(*ctx)); - return 0; +static int crypto_hmac_md5_import(struct shash_desc *desc, const void *in) +{ + struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc); + + ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate; + return __crypto_md5_import(&ctx->hash_ctx, in); } -static int md5_import(struct shash_desc *desc, const void *in) +static int crypto_hmac_md5_export_core(struct shash_desc *desc, void *out) { - struct md5_state *ctx = shash_desc_ctx(desc); + return __crypto_md5_export_core(&HMAC_MD5_CTX(desc)->hash_ctx, out); +} - memcpy(ctx, in, sizeof(*ctx)); - return 0; +static int crypto_hmac_md5_import_core(struct shash_desc *desc, const void *in) +{ + struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc); + + ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate; + return __crypto_md5_import_core(&ctx->hash_ctx, in); } -static struct shash_alg alg = { - .digestsize = MD5_DIGEST_SIZE, - .init = md5_init, - .update = md5_update, - .final = md5_final, - .export = md5_export, - .import = md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), - .base = { - .cra_name = "md5", - .cra_driver_name = "md5-generic", - .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } +static struct shash_alg algs[] = { + { + .base.cra_name = "md5", + .base.cra_driver_name = "md5-lib", + .base.cra_priority = 300, + .base.cra_blocksize = MD5_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = MD5_DIGEST_SIZE, + .init = crypto_md5_init, + .update = crypto_md5_update, + .final = crypto_md5_final, + .digest = crypto_md5_digest, + .export = crypto_md5_export, + .import = crypto_md5_import, + .export_core = crypto_md5_export_core, + .import_core = crypto_md5_import_core, + .descsize = sizeof(struct md5_ctx), + .statesize = MD5_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(md5)", + .base.cra_driver_name = "hmac-md5-lib", + .base.cra_priority = 300, + .base.cra_blocksize = MD5_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_md5_key), + .base.cra_module = THIS_MODULE, + .digestsize = MD5_DIGEST_SIZE, + .setkey = crypto_hmac_md5_setkey, + .init = crypto_hmac_md5_init, + .update = crypto_hmac_md5_update, + .final = crypto_hmac_md5_final, + .digest = crypto_hmac_md5_digest, + .export = crypto_hmac_md5_export, + .import = crypto_hmac_md5_import, + .export_core = crypto_hmac_md5_export_core, + .import_core = crypto_hmac_md5_import_core, + .descsize = sizeof(struct hmac_md5_ctx), + .statesize = MD5_SHASH_STATE_SIZE, + }, }; -static int __init md5_mod_init(void) +static int __init crypto_md5_mod_init(void) { - return crypto_register_shash(&alg); + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } +module_init(crypto_md5_mod_init); -static void __exit md5_mod_fini(void) +static void __exit crypto_md5_mod_exit(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } - -subsys_initcall(md5_mod_init); -module_exit(md5_mod_fini); +module_exit(crypto_md5_mod_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("MD5 Message Digest Algorithm"); +MODULE_DESCRIPTION("Crypto API support for MD5 and HMAC-MD5"); + MODULE_ALIAS_CRYPTO("md5"); +MODULE_ALIAS_CRYPTO("md5-lib"); +MODULE_ALIAS_CRYPTO("hmac(md5)"); +MODULE_ALIAS_CRYPTO("hmac-md5-lib"); diff --git a/crypto/memneq.c b/crypto/memneq.c deleted file mode 100644 index afed1bd16aee..000000000000 --- a/crypto/memneq.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Constant-time equality testing of memory regions. - * - * Authors: - * - * James Yonan <james@openvpn.net> - * Daniel Borkmann <dborkman@redhat.com> - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of OpenVPN Technologies nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <crypto/algapi.h> - -#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ - -/* Generic path for arbitrary size */ -static inline unsigned long -__crypto_memneq_generic(const void *a, const void *b, size_t size) -{ - unsigned long neq = 0; - -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - while (size >= sizeof(unsigned long)) { - neq |= *(unsigned long *)a ^ *(unsigned long *)b; - OPTIMIZER_HIDE_VAR(neq); - a += sizeof(unsigned long); - b += sizeof(unsigned long); - size -= sizeof(unsigned long); - } -#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - while (size > 0) { - neq |= *(unsigned char *)a ^ *(unsigned char *)b; - OPTIMIZER_HIDE_VAR(neq); - a += 1; - b += 1; - size -= 1; - } - return neq; -} - -/* Loop-free fast-path for frequently used 16-byte size */ -static inline unsigned long __crypto_memneq_16(const void *a, const void *b) -{ - unsigned long neq = 0; - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (sizeof(unsigned long) == 8) { - neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); - OPTIMIZER_HIDE_VAR(neq); - } else if (sizeof(unsigned int) == 4) { - neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); - OPTIMIZER_HIDE_VAR(neq); - } else -#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - { - neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); - OPTIMIZER_HIDE_VAR(neq); - neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); - OPTIMIZER_HIDE_VAR(neq); - } - - return neq; -} - -/* Compare two areas of memory without leaking timing information, - * and with special optimizations for common sizes. Users should - * not call this function directly, but should instead use - * crypto_memneq defined in crypto/algapi.h. - */ -noinline unsigned long __crypto_memneq(const void *a, const void *b, - size_t size) -{ - switch (size) { - case 16: - return __crypto_memneq_16(a, b); - default: - return __crypto_memneq_generic(a, b, size); - } -} -EXPORT_SYMBOL(__crypto_memneq); - -#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index f4c31049601c..69ad35f524d7 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -7,7 +7,7 @@ * Copyright (c) 2004 Jouni Malinen <j@w1.fi> */ #include <crypto/internal/hash.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> @@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void) } -subsys_initcall(michael_mic_init); +module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c index 8a3006c3b51b..2b648615b5ec 100644 --- a/crypto/nhpoly1305.c +++ b/crypto/nhpoly1305.c @@ -30,7 +30,7 @@ * (https://cr.yp.to/mac/poly1305-20050329.pdf) */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> @@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void) crypto_unregister_shash(&nhpoly1305_alg); } -subsys_initcall(nhpoly1305_mod_init); +module_init(nhpoly1305_mod_init); module_exit(nhpoly1305_mod_exit); MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function"); diff --git a/crypto/ofb.c b/crypto/ofb.c deleted file mode 100644 index b630fdecceee..000000000000 --- a/crypto/ofb.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * OFB: Output FeedBack mode - * - * Copyright (C) 2018 ARM Limited or its affiliates. - * All rights reserved. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/skcipher.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> - -static int crypto_ofb_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - const unsigned int bsize = crypto_cipher_blocksize(cipher); - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes >= bsize) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - u8 * const iv = walk.iv; - unsigned int nbytes = walk.nbytes; - - do { - crypto_cipher_encrypt_one(cipher, iv, iv); - crypto_xor_cpy(dst, src, iv, bsize); - dst += bsize; - src += bsize; - } while ((nbytes -= bsize) >= bsize); - - err = skcipher_walk_done(&walk, nbytes); - } - - if (walk.nbytes) { - crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv); - crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - return err; -} - -static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct skcipher_instance *inst; - struct crypto_alg *alg; - int err; - - inst = skcipher_alloc_instance_simple(tmpl, tb); - if (IS_ERR(inst)) - return PTR_ERR(inst); - - alg = skcipher_ialg_simple(inst); - - /* OFB mode is a stream cipher. */ - inst->alg.base.cra_blocksize = 1; - - /* - * To simplify the implementation, configure the skcipher walk to only - * give a partial block at the very end, never earlier. - */ - inst->alg.chunksize = alg->cra_blocksize; - - inst->alg.encrypt = crypto_ofb_crypt; - inst->alg.decrypt = crypto_ofb_crypt; - - err = skcipher_register_instance(tmpl, inst); - if (err) - inst->free(inst); - - return err; -} - -static struct crypto_template crypto_ofb_tmpl = { - .name = "ofb", - .create = crypto_ofb_create, - .module = THIS_MODULE, -}; - -static int __init crypto_ofb_module_init(void) -{ - return crypto_register_template(&crypto_ofb_tmpl); -} - -static void __exit crypto_ofb_module_exit(void) -{ - crypto_unregister_template(&crypto_ofb_tmpl); -} - -subsys_initcall(crypto_ofb_module_init); -module_exit(crypto_ofb_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("OFB block cipher mode of operation"); -MODULE_ALIAS_CRYPTO("ofb"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 7030f59e46b6..d092717ea4fc 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { - memcpy(tmpbuf, src, bsize); - crypto_xor(iv, src, bsize); - crypto_cipher_encrypt_one(tfm, src, iv); - crypto_xor_cpy(iv, tmpbuf, src, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_xor(iv, dst, bsize); + crypto_cipher_encrypt_one(tfm, dst, iv); + crypto_xor_cpy(iv, tmpbuf, dst, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -71,7 +71,7 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(req, &walk, cipher); @@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { - memcpy(tmpbuf, src, bsize); - crypto_cipher_decrypt_one(tfm, src, src); - crypto_xor(src, iv, bsize); - crypto_xor_cpy(iv, src, tmpbuf, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_cipher_decrypt_one(tfm, dst, dst); + crypto_xor(dst, iv, bsize); + crypto_xor_cpy(iv, dst, tmpbuf, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -138,7 +138,7 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(req, &walk, cipher); @@ -186,10 +186,10 @@ static void __exit crypto_pcbc_module_exit(void) crypto_unregister_template(&crypto_pcbc_tmpl); } -subsys_initcall(crypto_pcbc_module_init); +module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index d569c7ed6c80..c3a9d4f2995c 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -63,9 +63,9 @@ static void pcrypt_aead_serial(struct padata_priv *padata) aead_request_complete(req->base.data, padata->info); } -static void pcrypt_aead_done(struct crypto_async_request *areq, int err) +static void pcrypt_aead_done(void *data, int err) { - struct aead_request *req = areq->data; + struct aead_request *req = data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); @@ -78,12 +78,14 @@ static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_encrypt(req); + ret = crypto_aead_encrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -115,6 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req) err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_encrypt(creq); + } return err; } @@ -123,12 +129,14 @@ static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_decrypt(req); + ret = crypto_aead_decrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -160,13 +168,17 @@ static int pcrypt_aead_decrypt(struct aead_request *req) err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_decrypt(creq); + } return err; } static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { - int cpu, cpu_index; + int cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -175,10 +187,7 @@ static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); - ctx->cb_cpu = cpumask_first(cpu_online_mask); - for (cpu = 0; cpu < cpu_index; cpu++) - ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); - + ctx->cb_cpu = cpumask_nth(cpu_index, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) @@ -369,7 +378,7 @@ static void __exit pcrypt_exit(void) kset_unregister(pcrypt_kset); } -subsys_initcall(pcrypt_init); +module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c deleted file mode 100644 index 94af47eb6fa6..000000000000 --- a/crypto/poly1305_generic.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Poly1305 authenticator algorithm, RFC7539 - * - * Copyright (C) 2015 Martin Willi - * - * Based on public domain code by Andrew Moon and Daniel J. Bernstein. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/unaligned.h> - -static int crypto_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) -{ - if (!dctx->sset) { - if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { - poly1305_core_setkey(&dctx->core_r, src); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->rset = 2; - } - if (srclen >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return srclen; -} - -static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) -{ - unsigned int datalen; - - if (unlikely(!dctx->sset)) { - datalen = crypto_poly1305_setdesckey(dctx, src, srclen); - src += srclen - datalen; - srclen = datalen; - } - - poly1305_core_blocks(&dctx->h, &dctx->core_r, src, - srclen / POLY1305_BLOCK_SIZE, 1); -} - -static int crypto_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - poly1305_blocks(dctx, src, srclen); - src += srclen - (srclen % POLY1305_BLOCK_SIZE); - srclen %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } - - return 0; -} - -static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_generic(dctx, dst); - return 0; -} - -static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, - .update = crypto_poly1305_update, - .final = crypto_poly1305_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-generic", - .cra_priority = 100, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_mod_init(void) -{ - return crypto_register_shash(&poly1305_alg); -} - -static void __exit poly1305_mod_exit(void) -{ - crypto_unregister_shash(&poly1305_alg); -} - -subsys_initcall(poly1305_mod_init); -module_exit(poly1305_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("Poly1305 authenticator"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-generic"); diff --git a/crypto/proc.c b/crypto/proc.c index 12fccb9c5205..82f15b967e85 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -11,6 +11,7 @@ #include <linux/atomic.h> #include <linux/init.h> #include <linux/crypto.h> +#include <linux/fips.h> #include <linux/module.h> /* for module_name() */ #include <linux/rwsem.h> #include <linux/proc_fs.h> @@ -46,8 +47,10 @@ static int c_show(struct seq_file *m, void *p) (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); seq_printf(m, "internal : %s\n", - (alg->cra_flags & CRYPTO_ALG_INTERNAL) ? - "yes" : "no"); + str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL)); + if (fips_enabled) + seq_printf(m, "fips : %s\n", + str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)); if (alg->cra_flags & CRYPTO_ALG_LARVAL) { seq_printf(m, "type : larval\n"); @@ -69,9 +72,6 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; - case CRYPTO_ALG_TYPE_COMPRESS: - seq_printf(m, "type : compression\n"); - break; default: seq_printf(m, "type : unknown\n"); break; diff --git a/crypto/rmd160.c b/crypto/rmd160.c index c5fe4034b153..9860b60c9be4 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -9,18 +9,14 @@ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> */ #include <crypto/internal/hash.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <asm/byteorder.h> - +#include <linux/string.h> #include "ripemd.h" struct rmd160_ctx { u64 byte_count; u32 state[5]; - __le32 buffer[16]; }; #define K1 RMD_K1 @@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc) rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - return 0; } static int rmd160_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + int remain = len - round_down(len, RMD160_BLOCK_SIZE); struct rmd160_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; + __le32 buffer[RMD160_BLOCK_SIZE / 4]; - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); + rctx->byte_count += len - remain; - rmd160_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd160_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } + do { + memcpy(buffer, data, sizeof(buffer)); + rmd160_transform(rctx->state, buffer); + data += sizeof(buffer); + len -= sizeof(buffer); + } while (len >= sizeof(buffer)); - memcpy(rctx->buffer, data, len); - -out: - return 0; + memzero_explicit(buffer, sizeof(buffer)); + return remain; } /* Add padding and return the message digest. */ -static int rmd160_final(struct shash_desc *desc, u8 *out) +static int rmd160_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { + unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1; struct rmd160_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; + union { + __le64 l64[RMD160_BLOCK_SIZE / 4]; + __le32 l32[RMD160_BLOCK_SIZE / 2]; + u8 u8[RMD160_BLOCK_SIZE * 2]; + } block = {}; __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd160_update(desc, padding, padlen); + u32 i; - /* Append length */ - rmd160_update(desc, (const u8 *)&bits, sizeof(bits)); + rctx->byte_count += len; + if (len >= bit_offset * 8) + bit_offset += RMD160_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3); + + rmd160_transform(rctx->state, block.l32); + if (bit_offset > RMD160_BLOCK_SIZE / 8) + rmd160_transform(rctx->state, + block.l32 + RMD160_BLOCK_SIZE / 4); + memzero_explicit(&block, sizeof(block)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - return 0; } @@ -338,11 +321,12 @@ static struct shash_alg alg = { .digestsize = RMD160_DIGEST_SIZE, .init = rmd160_init, .update = rmd160_update, - .final = rmd160_final, + .finup = rmd160_finup, .descsize = sizeof(struct rmd160_ctx), .base = { .cra_name = "rmd160", .cra_driver_name = "rmd160-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = RMD160_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(rmd160_mod_init); +module_init(rmd160_mod_init); module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/rng.c b/crypto/rng.c index fea082b25fe4..ee1768c5a400 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -8,17 +8,17 @@ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <linux/atomic.h> #include <crypto/internal/rng.h> +#include <linux/atomic.h> +#include <linux/cryptouser.h> #include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/cryptouser.h> -#include <linux/compiler.h> #include <net/netlink.h> #include "internal.h" @@ -30,7 +30,6 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { - struct crypto_alg *alg = tfm->base.__crt_alg; u8 *buf = NULL; int err; @@ -45,9 +44,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) seed = buf; } - crypto_stats_get(alg); err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - crypto_stats_rng_seed(alg, err); out: kfree_sensitive(buf); return err; @@ -66,8 +63,8 @@ static unsigned int seedsize(struct crypto_alg *alg) return ralg->seedsize; } -#ifdef CONFIG_NET -static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_rng_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_rng rrng; @@ -79,12 +76,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng); } -#else -static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -100,11 +91,14 @@ static const struct crypto_type crypto_rng_type = { #ifdef CONFIG_PROC_FS .show = crypto_rng_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_rng_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_RNG, .tfmsize = offsetof(struct crypto_rng, base), + .algsize = offsetof(struct rng_alg, base), }; struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) @@ -174,6 +168,11 @@ out: EXPORT_SYMBOL_GPL(crypto_del_default_rng); #endif +static void rng_default_set_ent(struct crypto_rng *tfm, const u8 *data, + unsigned int len) +{ +} + int crypto_register_rng(struct rng_alg *alg) { struct crypto_alg *base = &alg->base; @@ -185,6 +184,9 @@ int crypto_register_rng(struct rng_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_RNG; + if (!alg->set_ent) + alg->set_ent = rng_default_set_ent; + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_rng); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8ac3e73e8ea6..50bdb18e7b48 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -16,78 +16,6 @@ #include <linux/random.h> #include <linux/scatterlist.h> -/* - * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. - */ -static const u8 rsa_digest_info_md5[] = { - 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ - 0x05, 0x00, 0x04, 0x10 -}; - -static const u8 rsa_digest_info_sha1[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x0e, 0x03, 0x02, 0x1a, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 rsa_digest_info_rmd160[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x24, 0x03, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 rsa_digest_info_sha224[] = { - 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, - 0x05, 0x00, 0x04, 0x1c -}; - -static const u8 rsa_digest_info_sha256[] = { - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x20 -}; - -static const u8 rsa_digest_info_sha384[] = { - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, - 0x05, 0x00, 0x04, 0x30 -}; - -static const u8 rsa_digest_info_sha512[] = { - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, - 0x05, 0x00, 0x04, 0x40 -}; - -static const struct rsa_asn1_template { - const char *name; - const u8 *data; - size_t size; -} rsa_asn1_templates[] = { -#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) } - _(md5), - _(sha1), - _(rmd160), - _(sha256), - _(sha384), - _(sha512), - _(sha224), - { NULL } -#undef _ -}; - -static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) -{ - const struct rsa_asn1_template *p; - - for (p = rsa_asn1_templates; p->name; p++) - if (strcmp(name, p->name) == 0) - return p; - return NULL; -} - struct pkcs1pad_ctx { struct crypto_akcipher *child; unsigned int key_size; @@ -95,7 +23,6 @@ struct pkcs1pad_ctx { struct pkcs1pad_inst_ctx { struct crypto_akcipher_spawn spawn; - const struct rsa_asn1_template *digest_info; }; struct pkcs1pad_request { @@ -108,42 +35,16 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err; - ctx->key_size = 0; - - err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); - if (err) - return err; - - /* Find out new modulus size from rsa implementation */ - err = crypto_akcipher_maxsize(ctx->child); - if (err > PAGE_SIZE) - return -ENOTSUPP; - - ctx->key_size = err; - return 0; + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen); } static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err; - - ctx->key_size = 0; - - err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); - if (err) - return err; - - /* Find out new modulus size from rsa implementation */ - err = crypto_akcipher_maxsize(ctx->child); - if (err > PAGE_SIZE) - return -ENOTSUPP; - ctx->key_size = err; - return 0; + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen); } static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) @@ -151,9 +52,9 @@ static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); /* - * The maximum destination buffer size for the encrypt/sign operations + * The maximum destination buffer size for the encrypt operation * will be the same as for RSA, even though it's smaller for - * decrypt/verify. + * decrypt. */ return ctx->key_size; @@ -171,7 +72,7 @@ static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, sg_chain(sg, nsegs, next); } -static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +static int pkcs1pad_encrypt_complete(struct akcipher_request *req, int err) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); @@ -190,7 +91,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) if (likely(!pad_len)) goto out; - out_buf = kzalloc(ctx->key_size, GFP_KERNEL); + out_buf = kzalloc(ctx->key_size, GFP_ATOMIC); err = -ENOMEM; if (!out_buf) goto out; @@ -210,20 +111,17 @@ out: return err; } -static void pkcs1pad_encrypt_sign_complete_cb( - struct crypto_async_request *child_async_req, int err) +static void pkcs1pad_encrypt_complete_cb(void *data, int err) { - struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; + struct akcipher_request *req = data; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_encrypt_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, - pkcs1pad_encrypt_sign_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_encrypt(struct akcipher_request *req) @@ -253,7 +151,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) ps_end = ctx->key_size - req->src_len - 2; req_ctx->in_buf[0] = 0x02; for (i = 1; i < ps_end; i++) - req_ctx->in_buf[i] = 1 + prandom_u32_max(255); + req_ctx->in_buf[i] = get_random_u32_inclusive(1, 255); req_ctx->in_buf[ps_end] = 0x00; pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, @@ -261,7 +159,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_encrypt_sign_complete_cb, req); + pkcs1pad_encrypt_complete_cb, req); /* Reuse output buffer */ akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, @@ -269,7 +167,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) err = crypto_akcipher_encrypt(&req_ctx->child_req); if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_encrypt_sign_complete(req, err); + return pkcs1pad_encrypt_complete(req, err); return err; } @@ -328,19 +226,17 @@ done: return err; } -static void pkcs1pad_decrypt_complete_cb( - struct crypto_async_request *child_async_req, int err) +static void pkcs1pad_decrypt_complete_cb(void *data, int err) { - struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; + struct akcipher_request *req = data; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_decrypt_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_decrypt(struct akcipher_request *req) @@ -376,194 +272,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) return err; } -static int pkcs1pad_sign(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - struct akcipher_instance *inst = akcipher_alg_instance(tfm); - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); - const struct rsa_asn1_template *digest_info = ictx->digest_info; - int err; - unsigned int ps_end, digest_size = 0; - - if (!ctx->key_size) - return -EINVAL; - - if (digest_info) - digest_size = digest_info->size; - - if (req->src_len + digest_size > ctx->key_size - 11) - return -EOVERFLOW; - - if (req->dst_len < ctx->key_size) { - req->dst_len = ctx->key_size; - return -EOVERFLOW; - } - - req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, - GFP_KERNEL); - if (!req_ctx->in_buf) - return -ENOMEM; - - ps_end = ctx->key_size - digest_size - req->src_len - 2; - req_ctx->in_buf[0] = 0x01; - memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); - req_ctx->in_buf[ps_end] = 0x00; - - if (digest_info) - memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, - digest_info->size); - - pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, - ctx->key_size - 1 - req->src_len, req->src); - - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_encrypt_sign_complete_cb, req); - - /* Reuse output buffer */ - akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, - req->dst, ctx->key_size - 1, req->dst_len); - - err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_encrypt_sign_complete(req, err); - - return err; -} - -static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - struct akcipher_instance *inst = akcipher_alg_instance(tfm); - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); - const struct rsa_asn1_template *digest_info = ictx->digest_info; - unsigned int dst_len; - unsigned int pos; - u8 *out_buf; - - if (err) - goto done; - - err = -EINVAL; - dst_len = req_ctx->child_req.dst_len; - if (dst_len < ctx->key_size - 1) - goto done; - - out_buf = req_ctx->out_buf; - if (dst_len == ctx->key_size) { - if (out_buf[0] != 0x00) - /* Decrypted value had no leading 0 byte */ - goto done; - - dst_len--; - out_buf++; - } - - err = -EBADMSG; - if (out_buf[0] != 0x01) - goto done; - - for (pos = 1; pos < dst_len; pos++) - if (out_buf[pos] != 0xff) - break; - - if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) - goto done; - pos++; - - if (digest_info) { - if (crypto_memneq(out_buf + pos, digest_info->data, - digest_info->size)) - goto done; - - pos += digest_info->size; - } - - err = 0; - - if (req->dst_len != dst_len - pos) { - err = -EKEYREJECTED; - req->dst_len = dst_len - pos; - goto done; - } - /* Extract appended digest. */ - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, - req->src_len + req->dst_len), - req_ctx->out_buf + ctx->key_size, - req->dst_len, ctx->key_size); - /* Do the actual verification step. */ - if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, - req->dst_len) != 0) - err = -EKEYREJECTED; -done: - kfree_sensitive(req_ctx->out_buf); - - return err; -} - -static void pkcs1pad_verify_complete_cb( - struct crypto_async_request *child_async_req, int err) -{ - struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; - - if (err == -EINPROGRESS) - return; - - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); -} - -/* - * The verify operation is here for completeness similar to the verification - * defined in RFC2313 section 10.2 except that block type 0 is not accepted, - * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to - * retrieve the DigestInfo from a signature, instead the user is expected - * to call the sign operation to generate the expected signature and compare - * signatures instead of the message-digests. - */ -static int pkcs1pad_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - int err; - - if (WARN_ON(req->dst) || - WARN_ON(!req->dst_len) || - !ctx->key_size || req->src_len < ctx->key_size) - return -EINVAL; - - req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL); - if (!req_ctx->out_buf) - return -ENOMEM; - - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, - ctx->key_size, NULL); - - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_verify_complete_cb, req); - - /* Reuse input buffer, output to a new buffer */ - akcipher_request_set_crypt(&req_ctx->child_req, req->src, - req_ctx->out_sg, req->src_len, - ctx->key_size); - - err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_verify_complete(req, err); - - return err; -} - static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) { struct akcipher_instance *inst = akcipher_alg_instance(tfm); @@ -576,6 +284,10 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(child_tfm); ctx->child = child_tfm; + + akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) + + crypto_akcipher_reqsize(child_tfm)); + return 0; } @@ -601,7 +313,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; struct akcipher_alg *rsa_alg; - const char *hash_name; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); @@ -621,38 +332,22 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); - err = -ENAMETOOLONG; - hash_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(hash_name)) { - if (snprintf(inst->alg.base.cra_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - } else { - ctx->digest_info = rsa_lookup_asn1(hash_name); - if (!ctx->digest_info) { - err = -EINVAL; - goto err_free_inst; - } - - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, - hash_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", - rsa_alg->base.cra_driver_name, - hash_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { + err = -EINVAL; + goto err_free_inst; } + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); @@ -661,12 +356,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = pkcs1pad_encrypt; inst->alg.decrypt = pkcs1pad_decrypt; - inst->alg.sign = pkcs1pad_sign; - inst->alg.verify = pkcs1pad_verify; inst->alg.set_pub_key = pkcs1pad_set_pub_key; inst->alg.set_priv_key = pkcs1pad_set_priv_key; inst->alg.max_size = pkcs1pad_get_max_size; - inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; inst->free = pkcs1pad_free; @@ -683,3 +375,5 @@ struct crypto_template rsa_pkcs1pad_tmpl = { .create = pkcs1pad_create, .module = THIS_MODULE, }; + +MODULE_ALIAS_CRYPTO("pkcs1pad"); diff --git a/crypto/rsa.c b/crypto/rsa.c index 4cdbec95d077..6c7734083c98 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -5,6 +5,7 @@ * Authors: Tadeusz Struk <tadeusz.struk@intel.com> */ +#include <linux/fips.h> #include <linux/module.h> #include <linux/mpi.h> #include <crypto/internal/rsa.h> @@ -16,16 +17,45 @@ struct rsa_mpi_key { MPI n; MPI e; MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; }; +static int rsa_check_payload(MPI x, MPI n) +{ + MPI n1; + + if (mpi_cmp_ui(x, 1) <= 0) + return -EINVAL; + + n1 = mpi_alloc(0); + if (!n1) + return -ENOMEM; + + if (mpi_sub_ui(n1, n, 1) || mpi_cmp(x, n1) >= 0) { + mpi_free(n1); + return -EINVAL; + } + + mpi_free(n1); + return 0; +} + /* * RSAEP function [RFC3447 sec 5.1.1] * c = m^e mod n; */ static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) { - /* (1) Validate 0 <= m < n */ - if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) + /* + * Even though (1) in RFC3447 only requires 0 <= m <= n - 1, we are + * slightly more conservative and require 1 < m < n - 1. This is in line + * with SP 800-56Br2, Section 7.1.1. + */ + if (rsa_check_payload(m, key->n)) return -EINVAL; /* (2) c = m^e mod n */ @@ -34,16 +64,52 @@ static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) /* * RSADP function [RFC3447 sec 5.1.2] - * m = c^d mod n; + * m_1 = c^dP mod p; + * m_2 = c^dQ mod q; + * h = (m_1 - m_2) * qInv mod p; + * m = m_2 + q * h; */ -static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c) +static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) { - /* (1) Validate 0 <= c < n */ - if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) + MPI m2, m12_or_qh; + int ret = -ENOMEM; + + /* + * Even though (1) in RFC3447 only requires 0 <= c <= n - 1, we are + * slightly more conservative and require 1 < c < n - 1. This is in line + * with SP 800-56Br2, Section 7.1.2. + */ + if (rsa_check_payload(c, key->n)) return -EINVAL; - /* (2) m = c^d mod n */ - return mpi_powm(m, c, key->d, key->n); + m2 = mpi_alloc(0); + m12_or_qh = mpi_alloc(0); + if (!m2 || !m12_or_qh) + goto err_free_mpi; + + /* (2i) m_1 = c^dP mod p */ + ret = mpi_powm(m_or_m1_or_h, c, key->dp, key->p); + if (ret) + goto err_free_mpi; + + /* (2i) m_2 = c^dQ mod q */ + ret = mpi_powm(m2, c, key->dq, key->q); + if (ret) + goto err_free_mpi; + + /* (2iii) h = (m_1 - m_2) * qInv mod p */ + ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?: + mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); + + /* (2iv) m = m_2 + q * h */ + ret = ret ?: + mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?: + mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); + +err_free_mpi: + mpi_free(m12_or_qh); + mpi_free(m2); + return ret; } static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm) @@ -111,7 +177,7 @@ static int rsa_dec(struct akcipher_request *req) if (!c) goto err_free_m; - ret = _rsa_dec(pkey, m, c); + ret = _rsa_dec_crt(pkey, m, c); if (ret) goto err_free_c; @@ -133,9 +199,19 @@ static void rsa_free_mpi_key(struct rsa_mpi_key *key) mpi_free(key->d); mpi_free(key->e); mpi_free(key->n); + mpi_free(key->p); + mpi_free(key->q); + mpi_free(key->dp); + mpi_free(key->dq); + mpi_free(key->qinv); key->d = NULL; key->e = NULL; key->n = NULL; + key->p = NULL; + key->q = NULL; + key->dp = NULL; + key->dq = NULL; + key->qinv = NULL; } static int rsa_check_key_length(unsigned int len) @@ -144,6 +220,9 @@ static int rsa_check_key_length(unsigned int len) case 512: case 1024: case 1536: + if (fips_enabled) + return -EINVAL; + fallthrough; case 2048: case 3072: case 4096: @@ -153,6 +232,40 @@ static int rsa_check_key_length(unsigned int len) return -EINVAL; } +static int rsa_check_exponent_fips(MPI e) +{ + MPI e_max = NULL; + int err; + + /* check if odd */ + if (!mpi_test_bit(e, 0)) { + return -EINVAL; + } + + /* check if 2^16 < e < 2^256. */ + if (mpi_cmp_ui(e, 65536) <= 0) { + return -EINVAL; + } + + e_max = mpi_alloc(0); + if (!e_max) + return -ENOMEM; + + err = mpi_set_bit(e_max, 256); + if (err) { + mpi_free(e_max); + return err; + } + + if (mpi_cmp(e, e_max) >= 0) { + mpi_free(e_max); + return -EINVAL; + } + + mpi_free(e_max); + return 0; +} + static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { @@ -180,6 +293,11 @@ static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return -EINVAL; } + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; + } + return 0; err: @@ -213,11 +331,36 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, if (!mpi_key->n) goto err; + mpi_key->p = mpi_read_raw_data(raw_key.p, raw_key.p_sz); + if (!mpi_key->p) + goto err; + + mpi_key->q = mpi_read_raw_data(raw_key.q, raw_key.q_sz); + if (!mpi_key->q) + goto err; + + mpi_key->dp = mpi_read_raw_data(raw_key.dp, raw_key.dp_sz); + if (!mpi_key->dp) + goto err; + + mpi_key->dq = mpi_read_raw_data(raw_key.dq, raw_key.dq_sz); + if (!mpi_key->dq) + goto err; + + mpi_key->qinv = mpi_read_raw_data(raw_key.qinv, raw_key.qinv_sz); + if (!mpi_key->qinv) + goto err; + if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { rsa_free_mpi_key(mpi_key); return -EINVAL; } + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; + } + return 0; err: @@ -255,7 +398,7 @@ static struct akcipher_alg rsa = { }, }; -static int rsa_init(void) +static int __init rsa_init(void) { int err; @@ -264,21 +407,30 @@ static int rsa_init(void) return err; err = crypto_register_template(&rsa_pkcs1pad_tmpl); - if (err) { - crypto_unregister_akcipher(&rsa); - return err; - } + if (err) + goto err_unregister_rsa; + + err = crypto_register_template(&rsassa_pkcs1_tmpl); + if (err) + goto err_unregister_rsa_pkcs1pad; return 0; + +err_unregister_rsa_pkcs1pad: + crypto_unregister_template(&rsa_pkcs1pad_tmpl); +err_unregister_rsa: + crypto_unregister_akcipher(&rsa); + return err; } -static void rsa_exit(void) +static void __exit rsa_exit(void) { + crypto_unregister_template(&rsassa_pkcs1_tmpl); crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } -subsys_initcall(rsa_init); +module_init(rsa_init); module_exit(rsa_exit); MODULE_ALIAS_CRYPTO("rsa"); MODULE_LICENSE("GPL"); diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1 index 4ce06758e8af..76865124a9c7 100644 --- a/crypto/rsaprivkey.asn1 +++ b/crypto/rsaprivkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.2 + RsaPrivKey ::= SEQUENCE { version INTEGER, n INTEGER ({ rsa_get_n }), diff --git a/crypto/rsapubkey.asn1 b/crypto/rsapubkey.asn1 index 725498e461d2..0d32b1ca6270 100644 --- a/crypto/rsapubkey.asn1 +++ b/crypto/rsapubkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.1 + RsaPubKey ::= SEQUENCE { n INTEGER ({ rsa_get_n }), e INTEGER ({ rsa_get_e }) diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c new file mode 100644 index 000000000000..94fa5e9600e7 --- /dev/null +++ b/crypto/rsassa-pkcs1.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2) + * + * https://www.rfc-editor.org/rfc/rfc8017#section-8.2 + * + * Copyright (c) 2015 - 2024 Intel Corporation + */ + +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <crypto/akcipher.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/sig.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/sig.h> + +/* + * Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24) + * + * RSA keys are usually much larger than the hash of the message to be signed. + * The hash is therefore prepended by the Full Hash Prefix and a 0xff padding. + * The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID. + * + * https://www.rfc-editor.org/rfc/rfc9580#table-24 + */ + +static const u8 hash_prefix_none[] = { }; + +static const u8 hash_prefix_md5[] = { + 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */ + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */ + 0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */ +}; + +static const u8 hash_prefix_sha1[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x0e, 0x03, 0x02, 0x1a, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 hash_prefix_rmd160[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x24, 0x03, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 hash_prefix_sha224[] = { + 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, + 0x05, 0x00, 0x04, 0x1c +}; + +static const u8 hash_prefix_sha256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 hash_prefix_sha384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 hash_prefix_sha512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, + 0x05, 0x00, 0x04, 0x40 +}; + +static const u8 hash_prefix_sha3_256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 hash_prefix_sha3_384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 hash_prefix_sha3_512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a, + 0x05, 0x00, 0x04, 0x40 +}; + +static const struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +} hash_prefixes[] = { +#define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) } + _(none), + _(md5), + _(sha1), + _(rmd160), + _(sha256), + _(sha384), + _(sha512), + _(sha224), +#undef _ +#define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) } + _(256), + _(384), + _(512), +#undef _ + { NULL } +}; + +static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name) +{ + const struct hash_prefix *p; + + for (p = hash_prefixes; p->name; p++) + if (strcmp(name, p->name) == 0) + return p; + return NULL; +} + +static bool rsassa_pkcs1_invalid_hash_len(unsigned int len, + const struct hash_prefix *p) +{ + /* + * Legacy protocols such as TLS 1.1 or earlier and IKE version 1 + * do not prepend a Full Hash Prefix to the hash. In that case, + * the size of the Full Hash Prefix is zero. + */ + if (p->data == hash_prefix_none) + return false; + + /* + * The final byte of the Full Hash Prefix encodes the hash length. + * + * This needs to be revisited should hash algorithms with more than + * 1016 bits (127 bytes * 8) ever be added. The length would then + * be encoded into more than one byte by ASN.1. + */ + static_assert(HASH_MAX_DIGESTSIZE <= 127); + + return len != p->data[p->size - 1]; +} + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +static int rsassa_pkcs1_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + const struct hash_prefix *hash_prefix = ictx->hash_prefix; + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int pad_len; + unsigned int ps_end; + unsigned int len; + u8 *in_buf; + int err; + + if (!ctx->key_size) + return -EINVAL; + + if (dlen < ctx->key_size) + return -EOVERFLOW; + + if (rsassa_pkcs1_invalid_hash_len(slen, hash_prefix)) + return -EINVAL; + + if (slen + hash_prefix->size > ctx->key_size - 11) + return -EOVERFLOW; + + pad_len = ctx->key_size - slen - hash_prefix->size - 1; + + /* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */ + in_buf = dst; + memmove(in_buf + pad_len + hash_prefix->size, src, slen); + memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size); + + ps_end = pad_len - 1; + in_buf[0] = 0x01; + memset(in_buf + 1, 0xff, ps_end - 1); + in_buf[ps_end] = 0x00; + + + /* RFC 8017 sec 8.2.1 step 2 - RSA signature */ + err = crypto_akcipher_sync_decrypt(ctx->child, in_buf, + ctx->key_size - 1, in_buf, + ctx->key_size); + if (err < 0) + return err; + + len = err; + pad_len = ctx->key_size - len; + + /* Four billion to one */ + if (unlikely(pad_len)) { + memmove(dst + pad_len, dst, len); + memset(dst, 0, pad_len); + } + + return ctx->key_size; +} + +static int rsassa_pkcs1_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + const struct hash_prefix *hash_prefix = ictx->hash_prefix; + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child); + struct akcipher_request *child_req __free(kfree_sensitive) = NULL; + struct crypto_wait cwait; + struct scatterlist sg; + unsigned int dst_len; + unsigned int pos; + u8 *out_buf; + int err; + + /* RFC 8017 sec 8.2.2 step 1 - length checking */ + if (!ctx->key_size || + slen != ctx->key_size || + rsassa_pkcs1_invalid_hash_len(dlen, hash_prefix)) + return -EINVAL; + + /* RFC 8017 sec 8.2.2 step 2 - RSA verification */ + child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size, + GFP_KERNEL); + if (!child_req) + return -ENOMEM; + + out_buf = (u8 *)(child_req + 1) + child_reqsize; + memcpy(out_buf, src, slen); + + crypto_init_wait(&cwait); + sg_init_one(&sg, out_buf, slen); + akcipher_request_set_tfm(child_req, ctx->child); + akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen); + akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &cwait); + + err = crypto_akcipher_encrypt(child_req); + err = crypto_wait_req(err, &cwait); + if (err) + return err; + + /* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */ + dst_len = child_req->dst_len; + if (dst_len < ctx->key_size - 1) + return -EINVAL; + + if (dst_len == ctx->key_size) { + if (out_buf[0] != 0x00) + /* Encrypted value had no leading 0 byte */ + return -EINVAL; + + dst_len--; + out_buf++; + } + + if (out_buf[0] != 0x01) + return -EBADMSG; + + for (pos = 1; pos < dst_len; pos++) + if (out_buf[pos] != 0xff) + break; + + if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) + return -EBADMSG; + pos++; + + if (hash_prefix->size > dst_len - pos) + return -EBADMSG; + if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size)) + return -EBADMSG; + pos += hash_prefix->size; + + /* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */ + if (dlen != dst_len - pos) + return -EKEYREJECTED; + if (memcmp(digest, out_buf + pos, dlen) != 0) + return -EKEYREJECTED; + + return 0; +} + +static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return ctx->key_size * BITS_PER_BYTE; +} + +static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen); +} + +static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen); +} + +static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(&ictx->spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void rsassa_pkcs1_free(struct sig_instance *inst) +{ + struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst); + struct crypto_akcipher_spawn *spawn = &ctx->spawn; + + crypto_drop_akcipher(spawn); + kfree(inst); +} + +static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct rsassa_pkcs1_inst_ctx *ctx; + struct akcipher_alg *rsa_alg; + struct sig_instance *inst; + const char *hash_name; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = sig_instance_ctx(inst); + + err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); + + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { + err = -EINVAL; + goto err_free_inst; + } + + hash_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(hash_name)) { + err = PTR_ERR(hash_name); + goto err_free_inst; + } + + ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name); + if (!ctx->hash_prefix) { + err = -EINVAL; + goto err_free_inst; + } + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "pkcs1(%s,%s)", rsa_alg->base.cra_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "pkcs1(%s,%s)", rsa_alg->base.cra_driver_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx); + + inst->alg.init = rsassa_pkcs1_init_tfm; + inst->alg.exit = rsassa_pkcs1_exit_tfm; + + inst->alg.sign = rsassa_pkcs1_sign; + inst->alg.verify = rsassa_pkcs1_verify; + inst->alg.key_size = rsassa_pkcs1_key_size; + inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key; + inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key; + + inst->free = rsassa_pkcs1_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + rsassa_pkcs1_free(inst); + } + return err; +} + +struct crypto_template rsassa_pkcs1_tmpl = { + .name = "pkcs1", + .create = rsassa_pkcs1_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("pkcs1"); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 16f6ba896fb6..be0e24843806 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -15,59 +15,170 @@ #include <linux/module.h> #include <linux/scatterlist.h> -static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes) { - void *src = out ? buf : sgdata; - void *dst = out ? sgdata : buf; + struct scatterlist *sg = walk->sg; - memcpy(dst, src, nbytes); + nbytes += walk->offset - sg->offset; + + while (nbytes > sg->length) { + nbytes -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + nbytes; } +EXPORT_SYMBOL_GPL(scatterwalk_skip); -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out) +inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes) { - for (;;) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - u8 *vaddr; + do { + unsigned int to_copy; + + to_copy = scatterwalk_next(walk, nbytes); + memcpy(buf, walk->addr, to_copy); + scatterwalk_done_src(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk); - if (len_this_page > nbytes) - len_this_page = nbytes; +inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes) +{ + do { + unsigned int to_copy; + + to_copy = scatterwalk_next(walk, nbytes); + memcpy(walk->addr, buf, to_copy); + scatterwalk_done_dst(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk); - if (out != 2) { - vaddr = scatterwalk_map(walk); - memcpy_dir(buf, vaddr, len_this_page, out); - scatterwalk_unmap(vaddr); - } +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes) +{ + struct scatter_walk walk; - scatterwalk_advance(walk, len_this_page); + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; - if (nbytes == len_this_page) - break; + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_from_scatterwalk(buf, &walk, nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_from_sglist); - buf += len_this_page; - nbytes -= len_this_page; +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes) +{ + struct scatter_walk walk; - scatterwalk_pagedone(walk, out & 1, 1); - } + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; + + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_to_scatterwalk(&walk, buf, nbytes); } -EXPORT_SYMBOL_GPL(scatterwalk_copychunks); +EXPORT_SYMBOL_GPL(memcpy_to_sglist); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) +/** + * memcpy_sglist() - Copy data from one scatterlist to another + * @dst: The destination scatterlist. Can be NULL if @nbytes == 0. + * @src: The source scatterlist. Can be NULL if @nbytes == 0. + * @nbytes: Number of bytes to copy + * + * The scatterlists can describe exactly the same memory, in which case this + * function is a no-op. No other overlaps are supported. + * + * Context: Any context + */ +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct scatter_walk walk; - struct scatterlist tmp[2]; + unsigned int src_offset, dst_offset; - if (!nbytes) + if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */ return; - sg = scatterwalk_ffwd(tmp, sg, start); + src_offset = src->offset; + dst_offset = dst->offset; + for (;;) { + /* Compute the length to copy this step. */ + unsigned int len = min3(src->offset + src->length - src_offset, + dst->offset + dst->length - dst_offset, + nbytes); + struct page *src_page = sg_page(src); + struct page *dst_page = sg_page(dst); + const void *src_virt; + void *dst_virt; + + if (IS_ENABLED(CONFIG_HIGHMEM)) { + /* HIGHMEM: we may have to actually map the pages. */ + const unsigned int src_oip = offset_in_page(src_offset); + const unsigned int dst_oip = offset_in_page(dst_offset); + const unsigned int limit = PAGE_SIZE; + + /* Further limit len to not cross a page boundary. */ + len = min3(len, limit - src_oip, limit - dst_oip); + + /* Compute the source and destination pages. */ + src_page += src_offset / PAGE_SIZE; + dst_page += dst_offset / PAGE_SIZE; + + if (src_page != dst_page) { + /* Copy between different pages. */ + memcpy_page(dst_page, dst_oip, + src_page, src_oip, len); + flush_dcache_page(dst_page); + } else if (src_oip != dst_oip) { + /* Copy between different parts of same page. */ + dst_virt = kmap_local_page(dst_page); + memcpy(dst_virt + dst_oip, dst_virt + src_oip, + len); + kunmap_local(dst_virt); + flush_dcache_page(dst_page); + } /* Else, it's the same memory. No action needed. */ + } else { + /* + * !HIGHMEM: no mapping needed. Just work in the linear + * buffer of each sg entry. Note that we can cross page + * boundaries, as they are not significant in this case. + */ + src_virt = page_address(src_page) + src_offset; + dst_virt = page_address(dst_page) + dst_offset; + if (src_virt != dst_virt) { + memcpy(dst_virt, src_virt, len); + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) + __scatterwalk_flush_dcache_pages( + dst_page, dst_offset, len); + } /* Else, it's the same memory. No action needed. */ + } + nbytes -= len; + if (nbytes == 0) /* No more to copy? */ + break; - scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); + /* + * There's more to copy. Advance the offsets by the length + * copied this step, and advance the sg entries as needed. + */ + src_offset += len; + if (src_offset >= src->offset + src->length) { + src = sg_next(src); + src_offset = src->offset; + } + dst_offset += len; + if (dst_offset >= dst->offset + dst->length) { + dst = sg_next(dst); + dst_offset = dst->offset; + } + } } -EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); +EXPORT_SYMBOL_GPL(memcpy_sglist); struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, diff --git a/crypto/scompress.c b/crypto/scompress.c index 738f4f8f0f41..1a7ed8ae65b0 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -6,28 +6,31 @@ * Copyright (c) 2016, Intel Corporation * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> */ -#include <linux/errno.h> + +#include <crypto/internal/scompress.h> +#include <crypto/scatterwalk.h> +#include <linux/cpumask.h> +#include <linux/cryptouser.h> +#include <linux/err.h> +#include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/overflow.h> +#include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/crypto.h> -#include <linux/compiler.h> -#include <linux/vmalloc.h> -#include <crypto/algapi.h> -#include <linux/cryptouser.h> +#include <linux/workqueue.h> #include <net/netlink.h> -#include <linux/scatterlist.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/acompress.h> -#include <crypto/internal/scompress.h> -#include "internal.h" + +#include "compress.h" struct scomp_scratch { spinlock_t lock; - void *src; - void *dst; + union { + void *src; + unsigned long saddr; + }; }; static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { @@ -38,8 +41,12 @@ static const struct crypto_type crypto_scomp_type; static int scomp_scratch_users; static DEFINE_MUTEX(scomp_lock); -#ifdef CONFIG_NET -static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) +static cpumask_t scomp_scratch_want; +static void scomp_scratch_workfn(struct work_struct *work); +static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn); + +static int __maybe_unused crypto_scomp_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rscomp; @@ -50,12 +57,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rscomp), &rscomp); } -#else -static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -73,91 +74,206 @@ static void crypto_scomp_free_scratches(void) for_each_possible_cpu(i) { scratch = per_cpu_ptr(&scomp_scratch, i); - vfree(scratch->src); - vfree(scratch->dst); + free_page(scratch->saddr); scratch->src = NULL; - scratch->dst = NULL; } } -static int crypto_scomp_alloc_scratches(void) +static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) { - struct scomp_scratch *scratch; - int i; + int node = cpu_to_node(cpu); + struct page *page; + + page = alloc_pages_node(node, GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + spin_lock_bh(&scratch->lock); + scratch->src = page_address(page); + spin_unlock_bh(&scratch->lock); + return 0; +} - for_each_possible_cpu(i) { - void *mem; +static void scomp_scratch_workfn(struct work_struct *work) +{ + int cpu; - scratch = per_cpu_ptr(&scomp_scratch, i); + for_each_cpu(cpu, &scomp_scratch_want) { + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, cpu); + if (scratch->src) + continue; + if (scomp_alloc_scratch(scratch, cpu)) + break; - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) - goto error; - scratch->src = mem; - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) - goto error; - scratch->dst = mem; + cpumask_clear_cpu(cpu, &scomp_scratch_want); } - return 0; -error: - crypto_scomp_free_scratches(); - return -ENOMEM; +} + +static int crypto_scomp_alloc_scratches(void) +{ + unsigned int i = cpumask_first(cpu_possible_mask); + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, i); + return scomp_alloc_scratch(scratch, i); } static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) { + struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); int ret = 0; mutex_lock(&scomp_lock); - if (!scomp_scratch_users++) + ret = crypto_acomp_alloc_streams(&alg->streams); + if (ret) + goto unlock; + if (!scomp_scratch_users++) { ret = crypto_scomp_alloc_scratches(); + if (ret) + scomp_scratch_users--; + } +unlock: mutex_unlock(&scomp_lock); return ret; } +static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) +{ + int cpu = raw_smp_processor_id(); + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, cpu); + spin_lock(&scratch->lock); + if (likely(scratch->src)) + return scratch; + spin_unlock(&scratch->lock); + + cpumask_set_cpu(cpu, &scomp_scratch_want); + schedule_work(&scomp_scratch_work); + + scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask)); + spin_lock(&scratch->lock); + return scratch; +} + +static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) + __releases(scratch) +{ + spin_unlock(&scratch->lock); +} + static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); + bool src_isvirt = acomp_request_src_isvirt(req); + bool dst_isvirt = acomp_request_dst_isvirt(req); struct crypto_scomp *scomp = *tfm_ctx; - void **ctx = acomp_request_ctx(req); + struct crypto_acomp_stream *stream; struct scomp_scratch *scratch; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + struct page *spage, *dpage; + unsigned int n; + const u8 *src; + size_t soff; + size_t doff; + u8 *dst; int ret; - if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) + if (!req->src || !slen) return -EINVAL; - if (req->dst && !req->dlen) + if (!req->dst || !dlen) return -EINVAL; - if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) - req->dlen = SCOMP_SCRATCH_SIZE; + if (dst_isvirt) + dst = req->dvirt; + else { + if (dlen <= req->dst->length) { + dpage = sg_page(req->dst); + doff = req->dst->offset; + } else + return -ENOSYS; + + dpage += doff / PAGE_SIZE; + doff = offset_in_page(doff); + + n = (dlen - 1) / PAGE_SIZE; + n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE; + if (PageHighMem(dpage + n) && + size_add(doff, dlen) > PAGE_SIZE) + return -ENOSYS; + dst = kmap_local_page(dpage) + doff; + } - scratch = raw_cpu_ptr(&scomp_scratch); - spin_lock(&scratch->lock); + if (src_isvirt) + src = req->svirt; + else { + src = NULL; + do { + if (slen <= req->src->length) { + spage = sg_page(req->src); + soff = req->src->offset; + } else + break; + + spage = spage + soff / PAGE_SIZE; + soff = offset_in_page(soff); + + n = (slen - 1) / PAGE_SIZE; + n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE; + if (PageHighMem(spage + n) && + size_add(soff, slen) > PAGE_SIZE) + break; + src = kmap_local_page(spage) + soff; + } while (0); + } + + stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); - scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0); - if (dir) - ret = crypto_scomp_compress(scomp, scratch->src, req->slen, - scratch->dst, &req->dlen, *ctx); + if (!src_isvirt && !src) { + const u8 *src; + + scratch = scomp_lock_scratch(); + src = scratch->src; + memcpy_from_sglist(scratch->src, req->src, 0, slen); + + if (dir) + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); + else + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + scomp_unlock_scratch(scratch); + } else if (dir) + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); else - ret = crypto_scomp_decompress(scomp, scratch->src, req->slen, - scratch->dst, &req->dlen, *ctx); - if (!ret) { - if (!req->dst) { - req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); - if (!req->dst) { - ret = -ENOMEM; - goto out; - } + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + crypto_acomp_unlock_stream_bh(stream); + + req->dlen = dlen; + + if (!src_isvirt && src) + kunmap_local(src); + if (!dst_isvirt) { + kunmap_local(dst); + dlen += doff; + for (;;) { + flush_dcache_page(dpage); + if (dlen <= PAGE_SIZE) + break; + dlen -= PAGE_SIZE; + dpage++; } - scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, - 1); } -out: - spin_unlock(&scratch->lock); + return ret; } @@ -177,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) crypto_free_scomp(*ctx); + flush_work(&scomp_scratch_work); mutex_lock(&scomp_lock); if (!--scomp_scratch_users) crypto_scomp_free_scratches(); @@ -204,62 +321,50 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) crt->compress = scomp_acomp_compress; crt->decompress = scomp_acomp_decompress; - crt->dst_free = sgl_free; - crt->reqsize = sizeof(void *); return 0; } -struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) +static void crypto_scomp_destroy(struct crypto_alg *alg) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx; - - ctx = crypto_scomp_alloc_ctx(scomp); - if (IS_ERR(ctx)) { - kfree(req); - return NULL; - } - - *req->__ctx = ctx; + struct scomp_alg *scomp = __crypto_scomp_alg(alg); - return req; -} - -void crypto_acomp_scomp_free_ctx(struct acomp_req *req) -{ - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx = *req->__ctx; - - if (ctx) - crypto_scomp_free_ctx(scomp, ctx); + crypto_acomp_free_streams(&scomp->streams); } static const struct crypto_type crypto_scomp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_scomp_init_tfm, + .destroy = crypto_scomp_destroy, #ifdef CONFIG_PROC_FS .show = crypto_scomp_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_scomp_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SCOMPRESS, .tfmsize = offsetof(struct crypto_scomp, base), + .algsize = offsetof(struct scomp_alg, base), }; +static void scomp_prepare_alg(struct scomp_alg *alg) +{ + struct crypto_alg *base = &alg->calg.base; + + comp_prepare_alg(&alg->calg); + + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; +} + int crypto_register_scomp(struct scomp_alg *alg) { - struct crypto_alg *base = &alg->base; + struct crypto_alg *base = &alg->calg.base; + + scomp_prepare_alg(alg); base->cra_type = &crypto_scomp_type; - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; return crypto_register_alg(base); diff --git a/crypto/seed.c b/crypto/seed.c index 27720140820e..815391f213de 100644 --- a/crypto/seed.c +++ b/crypto/seed.c @@ -8,12 +8,12 @@ * Copyright (C) 2007 Korea Information Security Agency (KISA). */ +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #define SEED_NUM_KCONSTANTS 16 #define SEED_KEY_SIZE 16 @@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct seed_ctx *ctx = crypto_tfm_ctx(tfm); u32 *keyout = ctx->keysched; - const __be32 *key = (const __be32 *)in_key; u32 i, t0, t1, x1, x2, x3, x4; - x1 = be32_to_cpu(key[0]); - x2 = be32_to_cpu(key[1]); - x3 = be32_to_cpu(key[2]); - x4 = be32_to_cpu(key[3]); + x1 = get_unaligned_be32(&in_key[0]); + x2 = get_unaligned_be32(&in_key[4]); + x3 = get_unaligned_be32(&in_key[8]); + x4 = get_unaligned_be32(&in_key[12]); for (i = 0; i < SEED_NUM_KCONSTANTS; i++) { t0 = x1 + x3 - KC[i]; @@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 0); OP(x3, x4, x1, x2, 2); @@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 28); OP(x3, x4, x1, x2, 30); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } /* decrypt a block of text */ @@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 30); OP(x3, x4, x1, x2, 28); @@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 2); OP(x3, x4, x1, x2, 0); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } @@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = SEED_BLOCK_SIZE, .cra_ctxsize = sizeof(struct seed_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { @@ -466,7 +460,7 @@ static void __exit seed_fini(void) crypto_unregister_alg(&seed_alg); } -subsys_initcall(seed_init); +module_init(seed_init); module_exit(seed_fini); MODULE_DESCRIPTION("SEED Cipher Algorithm"); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 0899d527c284..2bae99e33526 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; if (err) @@ -36,10 +36,9 @@ out: kfree_sensitive(subreq->iv); } -static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, - int err) +static void seqiv_aead_encrypt_complete(void *data, int err) { - struct aead_request *req = base->data; + struct aead_request *req = data; seqiv_aead_encrypt_complete2(req, err); aead_request_complete(req, err); @@ -65,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req) data = req->base.data; info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { @@ -180,7 +168,7 @@ static void __exit seqiv_module_exit(void) crypto_unregister_template(&seqiv_tmpl); } -subsys_initcall(seqiv_module_init); +module_init(seqiv_module_init); module_exit(seqiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 45f98b750053..b21e7606c652 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -7,11 +7,11 @@ * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no> */ +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> -#include <asm/unaligned.h> -#include <linux/crypto.h> +#include <linux/unaligned.h> #include <linux/types.h> #include <crypto/serpent.h> @@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void) crypto_unregister_alg(&srp_alg); } -subsys_initcall(serpent_mod_init); +module_init(serpent_mod_init); module_exit(serpent_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha1.c b/crypto/sha1.c new file mode 100644 index 000000000000..4fbf61cf0370 --- /dev/null +++ b/crypto/sha1.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for SHA-1 and HMAC-SHA1 + * + * Copyright (c) Alan Smithee. + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> + * Copyright 2025 Google LLC + */ +#include <crypto/internal/hash.h> +#include <crypto/sha1.h> +#include <linux/kernel.h> +#include <linux/module.h> + +/* + * Export and import functions. crypto_shash wants a particular format that + * matches that used by some legacy drivers. It currently is the same as the + * library SHA context, except the value in bytecount must be block-aligned and + * the remainder must be stored in an extra u8 appended to the struct. + */ + +#define SHA1_SHASH_STATE_SIZE (sizeof(struct sha1_ctx) + 1) +static_assert(sizeof(struct sha1_ctx) == sizeof(struct sha1_state)); +static_assert(offsetof(struct sha1_ctx, state) == offsetof(struct sha1_state, state)); +static_assert(offsetof(struct sha1_ctx, bytecount) == offsetof(struct sha1_state, count)); +static_assert(offsetof(struct sha1_ctx, buf) == offsetof(struct sha1_state, buffer)); + +static int __crypto_sha1_export(const struct sha1_ctx *ctx0, void *out) +{ + struct sha1_ctx ctx = *ctx0; + unsigned int partial; + u8 *p = out; + + partial = ctx.bytecount % SHA1_BLOCK_SIZE; + ctx.bytecount -= partial; + memcpy(p, &ctx, sizeof(ctx)); + p += sizeof(ctx); + *p = partial; + return 0; +} + +static int __crypto_sha1_import(struct sha1_ctx *ctx, const void *in) +{ + const u8 *p = in; + + memcpy(ctx, p, sizeof(*ctx)); + p += sizeof(*ctx); + ctx->bytecount += *p; + return 0; +} + +static int __crypto_sha1_export_core(const struct sha1_ctx *ctx, void *out) +{ + memcpy(out, ctx, offsetof(struct sha1_ctx, buf)); + return 0; +} + +static int __crypto_sha1_import_core(struct sha1_ctx *ctx, const void *in) +{ + memcpy(ctx, in, offsetof(struct sha1_ctx, buf)); + return 0; +} + +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; +EXPORT_SYMBOL_GPL(sha1_zero_message_hash); + +#define SHA1_CTX(desc) ((struct sha1_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha1_init(struct shash_desc *desc) +{ + sha1_init(SHA1_CTX(desc)); + return 0; +} + +static int crypto_sha1_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + sha1_update(SHA1_CTX(desc), data, len); + return 0; +} + +static int crypto_sha1_final(struct shash_desc *desc, u8 *out) +{ + sha1_final(SHA1_CTX(desc), out); + return 0; +} + +static int crypto_sha1_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha1(data, len, out); + return 0; +} + +static int crypto_sha1_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha1_export(SHA1_CTX(desc), out); +} + +static int crypto_sha1_import(struct shash_desc *desc, const void *in) +{ + return __crypto_sha1_import(SHA1_CTX(desc), in); +} + +static int crypto_sha1_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha1_export_core(SHA1_CTX(desc), out); +} + +static int crypto_sha1_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_sha1_import_core(SHA1_CTX(desc), in); +} + +#define HMAC_SHA1_KEY(tfm) ((struct hmac_sha1_key *)crypto_shash_ctx(tfm)) +#define HMAC_SHA1_CTX(desc) ((struct hmac_sha1_ctx *)shash_desc_ctx(desc)) + +static int crypto_hmac_sha1_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_sha1_preparekey(HMAC_SHA1_KEY(tfm), raw_key, keylen); + return 0; +} + +static int crypto_hmac_sha1_init(struct shash_desc *desc) +{ + hmac_sha1_init(HMAC_SHA1_CTX(desc), HMAC_SHA1_KEY(desc->tfm)); + return 0; +} + +static int crypto_hmac_sha1_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_sha1_update(HMAC_SHA1_CTX(desc), data, len); + return 0; +} + +static int crypto_hmac_sha1_final(struct shash_desc *desc, u8 *out) +{ + hmac_sha1_final(HMAC_SHA1_CTX(desc), out); + return 0; +} + +static int crypto_hmac_sha1_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + hmac_sha1(HMAC_SHA1_KEY(desc->tfm), data, len, out); + return 0; +} + +static int crypto_hmac_sha1_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha1_export(&HMAC_SHA1_CTX(desc)->sha_ctx, out); +} + +static int crypto_hmac_sha1_import(struct shash_desc *desc, const void *in) +{ + struct hmac_sha1_ctx *ctx = HMAC_SHA1_CTX(desc); + + ctx->ostate = HMAC_SHA1_KEY(desc->tfm)->ostate; + return __crypto_sha1_import(&ctx->sha_ctx, in); +} + +static int crypto_hmac_sha1_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha1_export_core(&HMAC_SHA1_CTX(desc)->sha_ctx, out); +} + +static int crypto_hmac_sha1_import_core(struct shash_desc *desc, const void *in) +{ + struct hmac_sha1_ctx *ctx = HMAC_SHA1_CTX(desc); + + ctx->ostate = HMAC_SHA1_KEY(desc->tfm)->ostate; + return __crypto_sha1_import_core(&ctx->sha_ctx, in); +} + +static struct shash_alg algs[] = { + { + .base.cra_name = "sha1", + .base.cra_driver_name = "sha1-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA1_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA1_DIGEST_SIZE, + .init = crypto_sha1_init, + .update = crypto_sha1_update, + .final = crypto_sha1_final, + .digest = crypto_sha1_digest, + .export = crypto_sha1_export, + .import = crypto_sha1_import, + .export_core = crypto_sha1_export_core, + .import_core = crypto_sha1_import_core, + .descsize = sizeof(struct sha1_ctx), + .statesize = SHA1_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(sha1)", + .base.cra_driver_name = "hmac-sha1-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA1_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_sha1_key), + .base.cra_module = THIS_MODULE, + .digestsize = SHA1_DIGEST_SIZE, + .setkey = crypto_hmac_sha1_setkey, + .init = crypto_hmac_sha1_init, + .update = crypto_hmac_sha1_update, + .final = crypto_hmac_sha1_final, + .digest = crypto_hmac_sha1_digest, + .export = crypto_hmac_sha1_export, + .import = crypto_hmac_sha1_import, + .export_core = crypto_hmac_sha1_export_core, + .import_core = crypto_hmac_sha1_import_core, + .descsize = sizeof(struct hmac_sha1_ctx), + .statesize = SHA1_SHASH_STATE_SIZE, + }, +}; + +static int __init crypto_sha1_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha1_mod_init); + +static void __exit crypto_sha1_mod_exit(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} +module_exit(crypto_sha1_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API support for SHA-1 and HMAC-SHA1"); + +MODULE_ALIAS_CRYPTO("sha1"); +MODULE_ALIAS_CRYPTO("sha1-lib"); +MODULE_ALIAS_CRYPTO("hmac(sha1)"); +MODULE_ALIAS_CRYPTO("hmac-sha1-lib"); diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c deleted file mode 100644 index 325b57fe28dc..000000000000 --- a/crypto/sha1_generic.c +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * SHA1 Secure Hash Algorithm. - * - * Derived from cryptoapi implementation, adapted for in-place - * scatterlist interface. - * - * Copyright (c) Alan Smithee. - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha1.h> -#include <crypto/sha1_base.h> -#include <asm/byteorder.h> - -const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, - 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, - 0xaf, 0xd8, 0x07, 0x09 -}; -EXPORT_SYMBOL_GPL(sha1_zero_message_hash); - -static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, - int blocks) -{ - u32 temp[SHA1_WORKSPACE_WORDS]; - - while (blocks--) { - sha1_transform(sst->state, src, temp); - src += SHA1_BLOCK_SIZE; - } - memzero_explicit(temp, sizeof(temp)); -} - -int crypto_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return sha1_base_do_update(desc, data, len, sha1_generic_block_fn); -} -EXPORT_SYMBOL(crypto_sha1_update); - -static int sha1_final(struct shash_desc *desc, u8 *out) -{ - sha1_base_do_finalize(desc, sha1_generic_block_fn); - return sha1_base_finish(desc, out); -} - -int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha1_base_do_update(desc, data, len, sha1_generic_block_fn); - return sha1_final(desc, out); -} -EXPORT_SYMBOL(crypto_sha1_finup); - -static struct shash_alg alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_base_init, - .update = crypto_sha1_update, - .final = sha1_final, - .finup = crypto_sha1_finup, - .descsize = sizeof(struct sha1_state), - .base = { - .cra_name = "sha1", - .cra_driver_name= "sha1-generic", - .cra_priority = 100, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init sha1_generic_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit sha1_generic_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(sha1_generic_mod_init); -module_exit(sha1_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); - -MODULE_ALIAS_CRYPTO("sha1"); -MODULE_ALIAS_CRYPTO("sha1-generic"); diff --git a/crypto/sha256.c b/crypto/sha256.c new file mode 100644 index 000000000000..fb81defe084c --- /dev/null +++ b/crypto/sha256.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + * Copyright 2025 Google LLC + */ +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +/* + * Export and import functions. crypto_shash wants a particular format that + * matches that used by some legacy drivers. It currently is the same as the + * library SHA context, except the value in bytecount must be block-aligned and + * the remainder must be stored in an extra u8 appended to the struct. + */ + +#define SHA256_SHASH_STATE_SIZE 105 +static_assert(offsetof(struct __sha256_ctx, state) == 0); +static_assert(offsetof(struct __sha256_ctx, bytecount) == 32); +static_assert(offsetof(struct __sha256_ctx, buf) == 40); +static_assert(sizeof(struct __sha256_ctx) + 1 == SHA256_SHASH_STATE_SIZE); + +static int __crypto_sha256_export(const struct __sha256_ctx *ctx0, void *out) +{ + struct __sha256_ctx ctx = *ctx0; + unsigned int partial; + u8 *p = out; + + partial = ctx.bytecount % SHA256_BLOCK_SIZE; + ctx.bytecount -= partial; + memcpy(p, &ctx, sizeof(ctx)); + p += sizeof(ctx); + *p = partial; + return 0; +} + +static int __crypto_sha256_import(struct __sha256_ctx *ctx, const void *in) +{ + const u8 *p = in; + + memcpy(ctx, p, sizeof(*ctx)); + p += sizeof(*ctx); + ctx->bytecount += *p; + return 0; +} + +static int __crypto_sha256_export_core(const struct __sha256_ctx *ctx, + void *out) +{ + memcpy(out, ctx, offsetof(struct __sha256_ctx, buf)); + return 0; +} + +static int __crypto_sha256_import_core(struct __sha256_ctx *ctx, const void *in) +{ + memcpy(ctx, in, offsetof(struct __sha256_ctx, buf)); + return 0; +} + +/* SHA-224 */ + +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +#define SHA224_CTX(desc) ((struct sha224_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha224_init(struct shash_desc *desc) +{ + sha224_init(SHA224_CTX(desc)); + return 0; +} + +static int crypto_sha224_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + sha224_update(SHA224_CTX(desc), data, len); + return 0; +} + +static int crypto_sha224_final(struct shash_desc *desc, u8 *out) +{ + sha224_final(SHA224_CTX(desc), out); + return 0; +} + +static int crypto_sha224_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha224(data, len, out); + return 0; +} + +static int crypto_sha224_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export(&SHA224_CTX(desc)->ctx, out); +} + +static int crypto_sha224_import(struct shash_desc *desc, const void *in) +{ + return __crypto_sha256_import(&SHA224_CTX(desc)->ctx, in); +} + +static int crypto_sha224_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export_core(&SHA224_CTX(desc)->ctx, out); +} + +static int crypto_sha224_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_sha256_import_core(&SHA224_CTX(desc)->ctx, in); +} + +/* SHA-256 */ + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + +#define SHA256_CTX(desc) ((struct sha256_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha256_init(struct shash_desc *desc) +{ + sha256_init(SHA256_CTX(desc)); + return 0; +} + +static int crypto_sha256_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + sha256_update(SHA256_CTX(desc), data, len); + return 0; +} + +static int crypto_sha256_final(struct shash_desc *desc, u8 *out) +{ + sha256_final(SHA256_CTX(desc), out); + return 0; +} + +static int crypto_sha256_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha256(data, len, out); + return 0; +} + +static int crypto_sha256_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export(&SHA256_CTX(desc)->ctx, out); +} + +static int crypto_sha256_import(struct shash_desc *desc, const void *in) +{ + return __crypto_sha256_import(&SHA256_CTX(desc)->ctx, in); +} + +static int crypto_sha256_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export_core(&SHA256_CTX(desc)->ctx, out); +} + +static int crypto_sha256_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_sha256_import_core(&SHA256_CTX(desc)->ctx, in); +} + +/* HMAC-SHA224 */ + +#define HMAC_SHA224_KEY(tfm) ((struct hmac_sha224_key *)crypto_shash_ctx(tfm)) +#define HMAC_SHA224_CTX(desc) ((struct hmac_sha224_ctx *)shash_desc_ctx(desc)) + +static int crypto_hmac_sha224_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_sha224_preparekey(HMAC_SHA224_KEY(tfm), raw_key, keylen); + return 0; +} + +static int crypto_hmac_sha224_init(struct shash_desc *desc) +{ + hmac_sha224_init(HMAC_SHA224_CTX(desc), HMAC_SHA224_KEY(desc->tfm)); + return 0; +} + +static int crypto_hmac_sha224_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_sha224_update(HMAC_SHA224_CTX(desc), data, len); + return 0; +} + +static int crypto_hmac_sha224_final(struct shash_desc *desc, u8 *out) +{ + hmac_sha224_final(HMAC_SHA224_CTX(desc), out); + return 0; +} + +static int crypto_hmac_sha224_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, + u8 *out) +{ + hmac_sha224(HMAC_SHA224_KEY(desc->tfm), data, len, out); + return 0; +} + +static int crypto_hmac_sha224_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export(&HMAC_SHA224_CTX(desc)->ctx.sha_ctx, out); +} + +static int crypto_hmac_sha224_import(struct shash_desc *desc, const void *in) +{ + struct hmac_sha224_ctx *ctx = HMAC_SHA224_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA224_KEY(desc->tfm)->key.ostate; + return __crypto_sha256_import(&ctx->ctx.sha_ctx, in); +} + +static int crypto_hmac_sha224_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export_core(&HMAC_SHA224_CTX(desc)->ctx.sha_ctx, + out); +} + +static int crypto_hmac_sha224_import_core(struct shash_desc *desc, + const void *in) +{ + struct hmac_sha224_ctx *ctx = HMAC_SHA224_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA224_KEY(desc->tfm)->key.ostate; + return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in); +} + +/* HMAC-SHA256 */ + +#define HMAC_SHA256_KEY(tfm) ((struct hmac_sha256_key *)crypto_shash_ctx(tfm)) +#define HMAC_SHA256_CTX(desc) ((struct hmac_sha256_ctx *)shash_desc_ctx(desc)) + +static int crypto_hmac_sha256_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_sha256_preparekey(HMAC_SHA256_KEY(tfm), raw_key, keylen); + return 0; +} + +static int crypto_hmac_sha256_init(struct shash_desc *desc) +{ + hmac_sha256_init(HMAC_SHA256_CTX(desc), HMAC_SHA256_KEY(desc->tfm)); + return 0; +} + +static int crypto_hmac_sha256_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_sha256_update(HMAC_SHA256_CTX(desc), data, len); + return 0; +} + +static int crypto_hmac_sha256_final(struct shash_desc *desc, u8 *out) +{ + hmac_sha256_final(HMAC_SHA256_CTX(desc), out); + return 0; +} + +static int crypto_hmac_sha256_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, + u8 *out) +{ + hmac_sha256(HMAC_SHA256_KEY(desc->tfm), data, len, out); + return 0; +} + +static int crypto_hmac_sha256_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export(&HMAC_SHA256_CTX(desc)->ctx.sha_ctx, out); +} + +static int crypto_hmac_sha256_import(struct shash_desc *desc, const void *in) +{ + struct hmac_sha256_ctx *ctx = HMAC_SHA256_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA256_KEY(desc->tfm)->key.ostate; + return __crypto_sha256_import(&ctx->ctx.sha_ctx, in); +} + +static int crypto_hmac_sha256_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha256_export_core(&HMAC_SHA256_CTX(desc)->ctx.sha_ctx, + out); +} + +static int crypto_hmac_sha256_import_core(struct shash_desc *desc, + const void *in) +{ + struct hmac_sha256_ctx *ctx = HMAC_SHA256_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA256_KEY(desc->tfm)->key.ostate; + return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in); +} + +/* Algorithm definitions */ + +static struct shash_alg algs[] = { + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha224_update, + .final = crypto_sha224_final, + .digest = crypto_sha224_digest, + .export = crypto_sha224_export, + .import = crypto_sha224_import, + .export_core = crypto_sha224_export_core, + .import_core = crypto_sha224_import_core, + .descsize = sizeof(struct sha224_ctx), + .statesize = SHA256_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update, + .final = crypto_sha256_final, + .digest = crypto_sha256_digest, + .export = crypto_sha256_export, + .import = crypto_sha256_import, + .export_core = crypto_sha256_export_core, + .import_core = crypto_sha256_import_core, + .descsize = sizeof(struct sha256_ctx), + .statesize = SHA256_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(sha224)", + .base.cra_driver_name = "hmac-sha224-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_sha224_key), + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .setkey = crypto_hmac_sha224_setkey, + .init = crypto_hmac_sha224_init, + .update = crypto_hmac_sha224_update, + .final = crypto_hmac_sha224_final, + .digest = crypto_hmac_sha224_digest, + .export = crypto_hmac_sha224_export, + .import = crypto_hmac_sha224_import, + .export_core = crypto_hmac_sha224_export_core, + .import_core = crypto_hmac_sha224_import_core, + .descsize = sizeof(struct hmac_sha224_ctx), + .statesize = SHA256_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(sha256)", + .base.cra_driver_name = "hmac-sha256-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_sha256_key), + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .setkey = crypto_hmac_sha256_setkey, + .init = crypto_hmac_sha256_init, + .update = crypto_hmac_sha256_update, + .final = crypto_hmac_sha256_final, + .digest = crypto_hmac_sha256_digest, + .export = crypto_hmac_sha256_export, + .import = crypto_hmac_sha256_import, + .export_core = crypto_hmac_sha256_export_core, + .import_core = crypto_hmac_sha256_import_core, + .descsize = sizeof(struct hmac_sha256_ctx), + .statesize = SHA256_SHASH_STATE_SIZE, + }, +}; + +static int __init crypto_sha256_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha256_mod_init); + +static void __exit crypto_sha256_mod_exit(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} +module_exit(crypto_sha256_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API support for SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256"); + +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-lib"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-lib"); +MODULE_ALIAS_CRYPTO("hmac(sha224)"); +MODULE_ALIAS_CRYPTO("hmac-sha224-lib"); +MODULE_ALIAS_CRYPTO("hmac(sha256)"); +MODULE_ALIAS_CRYPTO("hmac-sha256-lib"); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c deleted file mode 100644 index 3b377197236e..000000000000 --- a/crypto/sha256_generic.c +++ /dev/null @@ -1,122 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> - -const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f -}; -EXPORT_SYMBOL_GPL(sha224_zero_message_hash); - -const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; -EXPORT_SYMBOL_GPL(sha256_zero_message_hash); - -static int crypto_sha256_init(struct shash_desc *desc) -{ - sha256_init(shash_desc_ctx(desc)); - return 0; -} - -static int crypto_sha224_init(struct shash_desc *desc) -{ - sha224_init(shash_desc_ctx(desc)); - return 0; -} - -int crypto_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return 0; -} -EXPORT_SYMBOL(crypto_sha256_update); - -static int crypto_sha256_final(struct shash_desc *desc, u8 *out) -{ - if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) - sha224_final(shash_desc_ctx(desc), out); - else - sha256_final(shash_desc_ctx(desc), out); - return 0; -} - -int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return crypto_sha256_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha256_finup); - -static struct shash_alg sha256_algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = crypto_sha256_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-generic", - .cra_priority = 100, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = crypto_sha224_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-generic", - .cra_priority = 100, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha256_generic_mod_init(void) -{ - return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -static void __exit sha256_generic_mod_fini(void) -{ - crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -subsys_initcall(sha256_generic_mod_init); -module_exit(sha256_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-generic"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-generic"); diff --git a/crypto/sha3.c b/crypto/sha3.c new file mode 100644 index 000000000000..8f364979ec89 --- /dev/null +++ b/crypto/sha3.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for SHA-3 + * (https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) + */ +#include <crypto/internal/hash.h> +#include <crypto/sha3.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#define SHA3_CTX(desc) ((struct sha3_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha3_224_init(struct shash_desc *desc) +{ + sha3_224_init(SHA3_CTX(desc)); + return 0; +} + +static int crypto_sha3_256_init(struct shash_desc *desc) +{ + sha3_256_init(SHA3_CTX(desc)); + return 0; +} + +static int crypto_sha3_384_init(struct shash_desc *desc) +{ + sha3_384_init(SHA3_CTX(desc)); + return 0; +} + +static int crypto_sha3_512_init(struct shash_desc *desc) +{ + sha3_512_init(SHA3_CTX(desc)); + return 0; +} + +static int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + sha3_update(SHA3_CTX(desc), data, len); + return 0; +} + +static int crypto_sha3_final(struct shash_desc *desc, u8 *out) +{ + sha3_final(SHA3_CTX(desc), out); + return 0; +} + +static int crypto_sha3_224_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha3_224(data, len, out); + return 0; +} + +static int crypto_sha3_256_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha3_256(data, len, out); + return 0; +} + +static int crypto_sha3_384_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha3_384(data, len, out); + return 0; +} + +static int crypto_sha3_512_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha3_512(data, len, out); + return 0; +} + +static int crypto_sha3_export_core(struct shash_desc *desc, void *out) +{ + memcpy(out, SHA3_CTX(desc), sizeof(struct sha3_ctx)); + return 0; +} + +static int crypto_sha3_import_core(struct shash_desc *desc, const void *in) +{ + memcpy(SHA3_CTX(desc), in, sizeof(struct sha3_ctx)); + return 0; +} + +static struct shash_alg algs[] = { { + .digestsize = SHA3_224_DIGEST_SIZE, + .init = crypto_sha3_224_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, + .digest = crypto_sha3_224_digest, + .export_core = crypto_sha3_export_core, + .import_core = crypto_sha3_import_core, + .descsize = sizeof(struct sha3_ctx), + .base.cra_name = "sha3-224", + .base.cra_driver_name = "sha3-224-lib", + .base.cra_blocksize = SHA3_224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_256_DIGEST_SIZE, + .init = crypto_sha3_256_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, + .digest = crypto_sha3_256_digest, + .export_core = crypto_sha3_export_core, + .import_core = crypto_sha3_import_core, + .descsize = sizeof(struct sha3_ctx), + .base.cra_name = "sha3-256", + .base.cra_driver_name = "sha3-256-lib", + .base.cra_blocksize = SHA3_256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_384_DIGEST_SIZE, + .init = crypto_sha3_384_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, + .digest = crypto_sha3_384_digest, + .export_core = crypto_sha3_export_core, + .import_core = crypto_sha3_import_core, + .descsize = sizeof(struct sha3_ctx), + .base.cra_name = "sha3-384", + .base.cra_driver_name = "sha3-384-lib", + .base.cra_blocksize = SHA3_384_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_512_DIGEST_SIZE, + .init = crypto_sha3_512_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, + .digest = crypto_sha3_512_digest, + .export_core = crypto_sha3_export_core, + .import_core = crypto_sha3_import_core, + .descsize = sizeof(struct sha3_ctx), + .base.cra_name = "sha3-512", + .base.cra_driver_name = "sha3-512-lib", + .base.cra_blocksize = SHA3_512_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +} }; + +static int __init crypto_sha3_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha3_mod_init); + +static void __exit crypto_sha3_mod_exit(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} +module_exit(crypto_sha3_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API support for SHA-3"); + +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-224-lib"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-256-lib"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-384-lib"); +MODULE_ALIAS_CRYPTO("sha3-512"); +MODULE_ALIAS_CRYPTO("sha3-512-lib"); diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c deleted file mode 100644 index 3e4069935b53..000000000000 --- a/crypto/sha3_generic.c +++ /dev/null @@ -1,305 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * SHA-3, as specified in - * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf - * - * SHA-3 code by Jeff Garzik <jeff@garzik.org> - * Ard Biesheuvel <ard.biesheuvel@linaro.org> - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/types.h> -#include <crypto/sha3.h> -#include <asm/unaligned.h> - -/* - * On some 32-bit architectures (h8300), GCC ends up using - * over 1 KB of stack if we inline the round calculation into the loop - * in keccakf(). On the other hand, on 64-bit architectures with plenty - * of [64-bit wide] general purpose registers, not inlining it severely - * hurts performance. So let's use 64-bitness as a heuristic to decide - * whether to inline or not. - */ -#ifdef CONFIG_64BIT -#define SHA3_INLINE inline -#else -#define SHA3_INLINE noinline -#endif - -#define KECCAK_ROUNDS 24 - -static const u64 keccakf_rndc[24] = { - 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, - 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, - 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL, - 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, - 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, - 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL, - 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL -}; - -/* update the state with given number of rounds */ - -static SHA3_INLINE void keccakf_round(u64 st[25]) -{ - u64 t[5], tt, bc[5]; - - /* Theta */ - bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; - bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; - bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; - bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; - bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; - - t[0] = bc[4] ^ rol64(bc[1], 1); - t[1] = bc[0] ^ rol64(bc[2], 1); - t[2] = bc[1] ^ rol64(bc[3], 1); - t[3] = bc[2] ^ rol64(bc[4], 1); - t[4] = bc[3] ^ rol64(bc[0], 1); - - st[0] ^= t[0]; - - /* Rho Pi */ - tt = st[1]; - st[ 1] = rol64(st[ 6] ^ t[1], 44); - st[ 6] = rol64(st[ 9] ^ t[4], 20); - st[ 9] = rol64(st[22] ^ t[2], 61); - st[22] = rol64(st[14] ^ t[4], 39); - st[14] = rol64(st[20] ^ t[0], 18); - st[20] = rol64(st[ 2] ^ t[2], 62); - st[ 2] = rol64(st[12] ^ t[2], 43); - st[12] = rol64(st[13] ^ t[3], 25); - st[13] = rol64(st[19] ^ t[4], 8); - st[19] = rol64(st[23] ^ t[3], 56); - st[23] = rol64(st[15] ^ t[0], 41); - st[15] = rol64(st[ 4] ^ t[4], 27); - st[ 4] = rol64(st[24] ^ t[4], 14); - st[24] = rol64(st[21] ^ t[1], 2); - st[21] = rol64(st[ 8] ^ t[3], 55); - st[ 8] = rol64(st[16] ^ t[1], 45); - st[16] = rol64(st[ 5] ^ t[0], 36); - st[ 5] = rol64(st[ 3] ^ t[3], 28); - st[ 3] = rol64(st[18] ^ t[3], 21); - st[18] = rol64(st[17] ^ t[2], 15); - st[17] = rol64(st[11] ^ t[1], 10); - st[11] = rol64(st[ 7] ^ t[2], 6); - st[ 7] = rol64(st[10] ^ t[0], 3); - st[10] = rol64( tt ^ t[1], 1); - - /* Chi */ - bc[ 0] = ~st[ 1] & st[ 2]; - bc[ 1] = ~st[ 2] & st[ 3]; - bc[ 2] = ~st[ 3] & st[ 4]; - bc[ 3] = ~st[ 4] & st[ 0]; - bc[ 4] = ~st[ 0] & st[ 1]; - st[ 0] ^= bc[ 0]; - st[ 1] ^= bc[ 1]; - st[ 2] ^= bc[ 2]; - st[ 3] ^= bc[ 3]; - st[ 4] ^= bc[ 4]; - - bc[ 0] = ~st[ 6] & st[ 7]; - bc[ 1] = ~st[ 7] & st[ 8]; - bc[ 2] = ~st[ 8] & st[ 9]; - bc[ 3] = ~st[ 9] & st[ 5]; - bc[ 4] = ~st[ 5] & st[ 6]; - st[ 5] ^= bc[ 0]; - st[ 6] ^= bc[ 1]; - st[ 7] ^= bc[ 2]; - st[ 8] ^= bc[ 3]; - st[ 9] ^= bc[ 4]; - - bc[ 0] = ~st[11] & st[12]; - bc[ 1] = ~st[12] & st[13]; - bc[ 2] = ~st[13] & st[14]; - bc[ 3] = ~st[14] & st[10]; - bc[ 4] = ~st[10] & st[11]; - st[10] ^= bc[ 0]; - st[11] ^= bc[ 1]; - st[12] ^= bc[ 2]; - st[13] ^= bc[ 3]; - st[14] ^= bc[ 4]; - - bc[ 0] = ~st[16] & st[17]; - bc[ 1] = ~st[17] & st[18]; - bc[ 2] = ~st[18] & st[19]; - bc[ 3] = ~st[19] & st[15]; - bc[ 4] = ~st[15] & st[16]; - st[15] ^= bc[ 0]; - st[16] ^= bc[ 1]; - st[17] ^= bc[ 2]; - st[18] ^= bc[ 3]; - st[19] ^= bc[ 4]; - - bc[ 0] = ~st[21] & st[22]; - bc[ 1] = ~st[22] & st[23]; - bc[ 2] = ~st[23] & st[24]; - bc[ 3] = ~st[24] & st[20]; - bc[ 4] = ~st[20] & st[21]; - st[20] ^= bc[ 0]; - st[21] ^= bc[ 1]; - st[22] ^= bc[ 2]; - st[23] ^= bc[ 3]; - st[24] ^= bc[ 4]; -} - -static void keccakf(u64 st[25]) -{ - int round; - - for (round = 0; round < KECCAK_ROUNDS; round++) { - keccakf_round(st); - /* Iota */ - st[0] ^= keccakf_rndc[round]; - } -} - -int crypto_sha3_init(struct shash_desc *desc) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - - sctx->rsiz = 200 - 2 * digest_size; - sctx->rsizw = sctx->rsiz / 8; - sctx->partial = 0; - - memset(sctx->st, 0, sizeof(sctx->st)); - return 0; -} -EXPORT_SYMBOL(crypto_sha3_init); - -int crypto_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int done; - const u8 *src; - - done = 0; - src = data; - - if ((sctx->partial + len) > (sctx->rsiz - 1)) { - if (sctx->partial) { - done = -sctx->partial; - memcpy(sctx->buf + sctx->partial, data, - done + sctx->rsiz); - src = sctx->buf; - } - - do { - unsigned int i; - - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(src + 8 * i); - keccakf(sctx->st); - - done += sctx->rsiz; - src = data + done; - } while (done + (sctx->rsiz - 1) < len); - - sctx->partial = 0; - } - memcpy(sctx->buf + sctx->partial, src, len - done); - sctx->partial += (len - done); - - return 0; -} -EXPORT_SYMBOL(crypto_sha3_update); - -int crypto_sha3_final(struct shash_desc *desc, u8 *out) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int i, inlen = sctx->partial; - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - __le64 *digest = (__le64 *)out; - - sctx->buf[inlen++] = 0x06; - memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); - sctx->buf[sctx->rsiz - 1] |= 0x80; - - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); - - keccakf(sctx->st); - - for (i = 0; i < digest_size / 8; i++) - put_unaligned_le64(sctx->st[i], digest++); - - if (digest_size & 4) - put_unaligned_le32(sctx->st[i], (__le32 *)digest); - - memset(sctx, 0, sizeof(*sctx)); - return 0; -} -EXPORT_SYMBOL(crypto_sha3_final); - -static struct shash_alg algs[] = { { - .digestsize = SHA3_224_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), - .base.cra_name = "sha3-224", - .base.cra_driver_name = "sha3-224-generic", - .base.cra_blocksize = SHA3_224_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}, { - .digestsize = SHA3_256_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), - .base.cra_name = "sha3-256", - .base.cra_driver_name = "sha3-256-generic", - .base.cra_blocksize = SHA3_256_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}, { - .digestsize = SHA3_384_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), - .base.cra_name = "sha3-384", - .base.cra_driver_name = "sha3-384-generic", - .base.cra_blocksize = SHA3_384_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}, { - .digestsize = SHA3_512_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), - .base.cra_name = "sha3-512", - .base.cra_driver_name = "sha3-512-generic", - .base.cra_blocksize = SHA3_512_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -} }; - -static int __init sha3_generic_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit sha3_generic_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -subsys_initcall(sha3_generic_mod_init); -module_exit(sha3_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm"); - -MODULE_ALIAS_CRYPTO("sha3-224"); -MODULE_ALIAS_CRYPTO("sha3-224-generic"); -MODULE_ALIAS_CRYPTO("sha3-256"); -MODULE_ALIAS_CRYPTO("sha3-256-generic"); -MODULE_ALIAS_CRYPTO("sha3-384"); -MODULE_ALIAS_CRYPTO("sha3-384-generic"); -MODULE_ALIAS_CRYPTO("sha3-512"); -MODULE_ALIAS_CRYPTO("sha3-512-generic"); diff --git a/crypto/sha512.c b/crypto/sha512.c new file mode 100644 index 000000000000..d320fe53913f --- /dev/null +++ b/crypto/sha512.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API support for SHA-384, SHA-512, HMAC-SHA384, and HMAC-SHA512 + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> + * Copyright 2025 Google LLC + */ +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +/* + * Export and import functions. crypto_shash wants a particular format that + * matches that used by some legacy drivers. It currently is the same as the + * library SHA context, except the value in bytecount_lo must be block-aligned + * and the remainder must be stored in an extra u8 appended to the struct. + */ + +#define SHA512_SHASH_STATE_SIZE 209 +static_assert(offsetof(struct __sha512_ctx, state) == 0); +static_assert(offsetof(struct __sha512_ctx, bytecount_lo) == 64); +static_assert(offsetof(struct __sha512_ctx, bytecount_hi) == 72); +static_assert(offsetof(struct __sha512_ctx, buf) == 80); +static_assert(sizeof(struct __sha512_ctx) + 1 == SHA512_SHASH_STATE_SIZE); + +static int __crypto_sha512_export(const struct __sha512_ctx *ctx0, void *out) +{ + struct __sha512_ctx ctx = *ctx0; + unsigned int partial; + u8 *p = out; + + partial = ctx.bytecount_lo % SHA512_BLOCK_SIZE; + ctx.bytecount_lo -= partial; + memcpy(p, &ctx, sizeof(ctx)); + p += sizeof(ctx); + *p = partial; + return 0; +} + +static int __crypto_sha512_import(struct __sha512_ctx *ctx, const void *in) +{ + const u8 *p = in; + + memcpy(ctx, p, sizeof(*ctx)); + p += sizeof(*ctx); + ctx->bytecount_lo += *p; + return 0; +} + +static int __crypto_sha512_export_core(const struct __sha512_ctx *ctx, + void *out) +{ + memcpy(out, ctx, offsetof(struct __sha512_ctx, buf)); + return 0; +} + +static int __crypto_sha512_import_core(struct __sha512_ctx *ctx, const void *in) +{ + memcpy(ctx, in, offsetof(struct __sha512_ctx, buf)); + return 0; +} + +/* SHA-384 */ + +const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { + 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, + 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, + 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43, + 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda, + 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb, + 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b +}; +EXPORT_SYMBOL_GPL(sha384_zero_message_hash); + +#define SHA384_CTX(desc) ((struct sha384_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha384_init(struct shash_desc *desc) +{ + sha384_init(SHA384_CTX(desc)); + return 0; +} + +static int crypto_sha384_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + sha384_update(SHA384_CTX(desc), data, len); + return 0; +} + +static int crypto_sha384_final(struct shash_desc *desc, u8 *out) +{ + sha384_final(SHA384_CTX(desc), out); + return 0; +} + +static int crypto_sha384_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha384(data, len, out); + return 0; +} + +static int crypto_sha384_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export(&SHA384_CTX(desc)->ctx, out); +} + +static int crypto_sha384_import(struct shash_desc *desc, const void *in) +{ + return __crypto_sha512_import(&SHA384_CTX(desc)->ctx, in); +} + +static int crypto_sha384_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export_core(&SHA384_CTX(desc)->ctx, out); +} + +static int crypto_sha384_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_sha512_import_core(&SHA384_CTX(desc)->ctx, in); +} + +/* SHA-512 */ + +const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = { + 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, + 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, + 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, + 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce, + 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0, + 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f, + 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81, + 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e +}; +EXPORT_SYMBOL_GPL(sha512_zero_message_hash); + +#define SHA512_CTX(desc) ((struct sha512_ctx *)shash_desc_ctx(desc)) + +static int crypto_sha512_init(struct shash_desc *desc) +{ + sha512_init(SHA512_CTX(desc)); + return 0; +} + +static int crypto_sha512_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + sha512_update(SHA512_CTX(desc), data, len); + return 0; +} + +static int crypto_sha512_final(struct shash_desc *desc, u8 *out) +{ + sha512_final(SHA512_CTX(desc), out); + return 0; +} + +static int crypto_sha512_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + sha512(data, len, out); + return 0; +} + +static int crypto_sha512_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export(&SHA512_CTX(desc)->ctx, out); +} + +static int crypto_sha512_import(struct shash_desc *desc, const void *in) +{ + return __crypto_sha512_import(&SHA512_CTX(desc)->ctx, in); +} + +static int crypto_sha512_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export_core(&SHA512_CTX(desc)->ctx, out); +} + +static int crypto_sha512_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_sha512_import_core(&SHA512_CTX(desc)->ctx, in); +} + +/* HMAC-SHA384 */ + +#define HMAC_SHA384_KEY(tfm) ((struct hmac_sha384_key *)crypto_shash_ctx(tfm)) +#define HMAC_SHA384_CTX(desc) ((struct hmac_sha384_ctx *)shash_desc_ctx(desc)) + +static int crypto_hmac_sha384_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_sha384_preparekey(HMAC_SHA384_KEY(tfm), raw_key, keylen); + return 0; +} + +static int crypto_hmac_sha384_init(struct shash_desc *desc) +{ + hmac_sha384_init(HMAC_SHA384_CTX(desc), HMAC_SHA384_KEY(desc->tfm)); + return 0; +} + +static int crypto_hmac_sha384_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_sha384_update(HMAC_SHA384_CTX(desc), data, len); + return 0; +} + +static int crypto_hmac_sha384_final(struct shash_desc *desc, u8 *out) +{ + hmac_sha384_final(HMAC_SHA384_CTX(desc), out); + return 0; +} + +static int crypto_hmac_sha384_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, + u8 *out) +{ + hmac_sha384(HMAC_SHA384_KEY(desc->tfm), data, len, out); + return 0; +} + +static int crypto_hmac_sha384_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export(&HMAC_SHA384_CTX(desc)->ctx.sha_ctx, out); +} + +static int crypto_hmac_sha384_import(struct shash_desc *desc, const void *in) +{ + struct hmac_sha384_ctx *ctx = HMAC_SHA384_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA384_KEY(desc->tfm)->key.ostate; + return __crypto_sha512_import(&ctx->ctx.sha_ctx, in); +} + +static int crypto_hmac_sha384_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export_core(&HMAC_SHA384_CTX(desc)->ctx.sha_ctx, + out); +} + +static int crypto_hmac_sha384_import_core(struct shash_desc *desc, + const void *in) +{ + struct hmac_sha384_ctx *ctx = HMAC_SHA384_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA384_KEY(desc->tfm)->key.ostate; + return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in); +} + +/* HMAC-SHA512 */ + +#define HMAC_SHA512_KEY(tfm) ((struct hmac_sha512_key *)crypto_shash_ctx(tfm)) +#define HMAC_SHA512_CTX(desc) ((struct hmac_sha512_ctx *)shash_desc_ctx(desc)) + +static int crypto_hmac_sha512_setkey(struct crypto_shash *tfm, + const u8 *raw_key, unsigned int keylen) +{ + hmac_sha512_preparekey(HMAC_SHA512_KEY(tfm), raw_key, keylen); + return 0; +} + +static int crypto_hmac_sha512_init(struct shash_desc *desc) +{ + hmac_sha512_init(HMAC_SHA512_CTX(desc), HMAC_SHA512_KEY(desc->tfm)); + return 0; +} + +static int crypto_hmac_sha512_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + hmac_sha512_update(HMAC_SHA512_CTX(desc), data, len); + return 0; +} + +static int crypto_hmac_sha512_final(struct shash_desc *desc, u8 *out) +{ + hmac_sha512_final(HMAC_SHA512_CTX(desc), out); + return 0; +} + +static int crypto_hmac_sha512_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, + u8 *out) +{ + hmac_sha512(HMAC_SHA512_KEY(desc->tfm), data, len, out); + return 0; +} + +static int crypto_hmac_sha512_export(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export(&HMAC_SHA512_CTX(desc)->ctx.sha_ctx, out); +} + +static int crypto_hmac_sha512_import(struct shash_desc *desc, const void *in) +{ + struct hmac_sha512_ctx *ctx = HMAC_SHA512_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA512_KEY(desc->tfm)->key.ostate; + return __crypto_sha512_import(&ctx->ctx.sha_ctx, in); +} + +static int crypto_hmac_sha512_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_sha512_export_core(&HMAC_SHA512_CTX(desc)->ctx.sha_ctx, + out); +} + +static int crypto_hmac_sha512_import_core(struct shash_desc *desc, + const void *in) +{ + struct hmac_sha512_ctx *ctx = HMAC_SHA512_CTX(desc); + + ctx->ctx.ostate = HMAC_SHA512_KEY(desc->tfm)->key.ostate; + return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in); +} + +/* Algorithm definitions */ + +static struct shash_alg algs[] = { + { + .base.cra_name = "sha384", + .base.cra_driver_name = "sha384-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA384_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA384_DIGEST_SIZE, + .init = crypto_sha384_init, + .update = crypto_sha384_update, + .final = crypto_sha384_final, + .digest = crypto_sha384_digest, + .export = crypto_sha384_export, + .import = crypto_sha384_import, + .export_core = crypto_sha384_export_core, + .import_core = crypto_sha384_import_core, + .descsize = sizeof(struct sha384_ctx), + .statesize = SHA512_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "sha512", + .base.cra_driver_name = "sha512-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA512_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA512_DIGEST_SIZE, + .init = crypto_sha512_init, + .update = crypto_sha512_update, + .final = crypto_sha512_final, + .digest = crypto_sha512_digest, + .export = crypto_sha512_export, + .import = crypto_sha512_import, + .export_core = crypto_sha512_export_core, + .import_core = crypto_sha512_import_core, + .descsize = sizeof(struct sha512_ctx), + .statesize = SHA512_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(sha384)", + .base.cra_driver_name = "hmac-sha384-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA384_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_sha384_key), + .base.cra_module = THIS_MODULE, + .digestsize = SHA384_DIGEST_SIZE, + .setkey = crypto_hmac_sha384_setkey, + .init = crypto_hmac_sha384_init, + .update = crypto_hmac_sha384_update, + .final = crypto_hmac_sha384_final, + .digest = crypto_hmac_sha384_digest, + .export = crypto_hmac_sha384_export, + .import = crypto_hmac_sha384_import, + .export_core = crypto_hmac_sha384_export_core, + .import_core = crypto_hmac_sha384_import_core, + .descsize = sizeof(struct hmac_sha384_ctx), + .statesize = SHA512_SHASH_STATE_SIZE, + }, + { + .base.cra_name = "hmac(sha512)", + .base.cra_driver_name = "hmac-sha512-lib", + .base.cra_priority = 300, + .base.cra_blocksize = SHA512_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct hmac_sha512_key), + .base.cra_module = THIS_MODULE, + .digestsize = SHA512_DIGEST_SIZE, + .setkey = crypto_hmac_sha512_setkey, + .init = crypto_hmac_sha512_init, + .update = crypto_hmac_sha512_update, + .final = crypto_hmac_sha512_final, + .digest = crypto_hmac_sha512_digest, + .export = crypto_hmac_sha512_export, + .import = crypto_hmac_sha512_import, + .export_core = crypto_hmac_sha512_export_core, + .import_core = crypto_hmac_sha512_import_core, + .descsize = sizeof(struct hmac_sha512_ctx), + .statesize = SHA512_SHASH_STATE_SIZE, + }, +}; + +static int __init crypto_sha512_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha512_mod_init); + +static void __exit crypto_sha512_mod_exit(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} +module_exit(crypto_sha512_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API support for SHA-384, SHA-512, HMAC-SHA384, and HMAC-SHA512"); + +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha384-lib"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha512-lib"); +MODULE_ALIAS_CRYPTO("hmac(sha384)"); +MODULE_ALIAS_CRYPTO("hmac-sha384-lib"); +MODULE_ALIAS_CRYPTO("hmac(sha512)"); +MODULE_ALIAS_CRYPTO("hmac-sha512-lib"); diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c deleted file mode 100644 index c72d72ad828e..000000000000 --- a/crypto/sha512_generic.c +++ /dev/null @@ -1,230 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* SHA-512 code by Jean-Luc Cooke <jlcooke@certainkey.com> - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> - */ -#include <crypto/internal/hash.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/crypto.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha512_base.h> -#include <linux/percpu.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> - -const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { - 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, - 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, - 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43, - 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda, - 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb, - 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b -}; -EXPORT_SYMBOL_GPL(sha384_zero_message_hash); - -const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = { - 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, - 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, - 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, - 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce, - 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0, - 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f, - 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81, - 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e -}; -EXPORT_SYMBOL_GPL(sha512_zero_message_hash); - -static inline u64 Ch(u64 x, u64 y, u64 z) -{ - return z ^ (x & (y ^ z)); -} - -static inline u64 Maj(u64 x, u64 y, u64 z) -{ - return (x & y) | (z & (x | y)); -} - -static const u64 sha512_K[80] = { - 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, - 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, - 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, - 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, - 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, - 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, - 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, - 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, - 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, - 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, - 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, - 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, - 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, - 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, - 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, - 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, - 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, - 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, - 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, - 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, - 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, - 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, - 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, - 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, - 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, - 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, - 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, -}; - -#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39)) -#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41)) -#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7)) -#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6)) - -static inline void LOAD_OP(int I, u64 *W, const u8 *input) -{ - W[I] = get_unaligned_be64((__u64 *)input + I); -} - -static inline void BLEND_OP(int I, u64 *W) -{ - W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); -} - -static void -sha512_transform(u64 *state, const u8 *input) -{ - u64 a, b, c, d, e, f, g, h, t1, t2; - - int i; - u64 W[16]; - - /* load the state into our registers */ - a=state[0]; b=state[1]; c=state[2]; d=state[3]; - e=state[4]; f=state[5]; g=state[6]; h=state[7]; - - /* now iterate */ - for (i=0; i<80; i+=8) { - if (!(i & 8)) { - int j; - - if (i < 16) { - /* load the input */ - for (j = 0; j < 16; j++) - LOAD_OP(i + j, W, input); - } else { - for (j = 0; j < 16; j++) { - BLEND_OP(i + j, W); - } - } - } - - t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; - } - - state[0] += a; state[1] += b; state[2] += c; state[3] += d; - state[4] += e; state[5] += f; state[6] += g; state[7] += h; - - /* erase our data */ - a = b = c = d = e = f = g = h = t1 = t2 = 0; -} - -static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, - int blocks) -{ - while (blocks--) { - sha512_transform(sst->state, src); - src += SHA512_BLOCK_SIZE; - } -} - -int crypto_sha512_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return sha512_base_do_update(desc, data, len, sha512_generic_block_fn); -} -EXPORT_SYMBOL(crypto_sha512_update); - -static int sha512_final(struct shash_desc *desc, u8 *hash) -{ - sha512_base_do_finalize(desc, sha512_generic_block_fn); - return sha512_base_finish(desc, hash); -} - -int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha512_base_do_update(desc, data, len, sha512_generic_block_fn); - return sha512_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha512_finup); - -static struct shash_alg sha512_algs[2] = { { - .digestsize = SHA512_DIGEST_SIZE, - .init = sha512_base_init, - .update = crypto_sha512_update, - .final = sha512_final, - .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), - .base = { - .cra_name = "sha512", - .cra_driver_name = "sha512-generic", - .cra_priority = 100, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA384_DIGEST_SIZE, - .init = sha384_base_init, - .update = crypto_sha512_update, - .final = sha512_final, - .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), - .base = { - .cra_name = "sha384", - .cra_driver_name = "sha384-generic", - .cra_priority = 100, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha512_generic_mod_init(void) -{ - return crypto_register_shashes(sha512_algs, ARRAY_SIZE(sha512_algs)); -} - -static void __exit sha512_generic_mod_fini(void) -{ - crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs)); -} - -subsys_initcall(sha512_generic_mod_init); -module_exit(sha512_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); - -MODULE_ALIAS_CRYPTO("sha384"); -MODULE_ALIAS_CRYPTO("sha384-generic"); -MODULE_ALIAS_CRYPTO("sha512"); -MODULE_ALIAS_CRYPTO("sha512-generic"); diff --git a/crypto/shash.c b/crypto/shash.c index 0a0a50cb694f..4721f5f134f4 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -6,59 +6,40 @@ */ #include <crypto/scatterwalk.h> -#include <crypto/internal/hash.h> +#include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/seq_file.h> -#include <linux/cryptouser.h> +#include <linux/string.h> #include <net/netlink.h> -#include <linux/compiler.h> - -#include "internal.h" -static const struct crypto_type crypto_shash_type; +#include "hash.h" -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +static inline bool crypto_shash_block_only(struct crypto_shash *tfm) { - return -ENOSYS; + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; } -/* - * Check whether an shash algorithm has a setkey function. - * - * For CFI compatibility, this must not be an inline function. This is because - * when CFI is enabled, modules won't get the same address for shash_no_setkey - * (if it were exported, which inlining would require) as the core kernel will. - */ -bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm) { - return alg->setkey != shash_no_setkey; + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; } -EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey); -static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +static inline bool crypto_shash_finup_max(struct crypto_shash *tfm) { - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned long absize; - u8 *buffer, *alignbuffer; - int err; - - absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); - buffer = kmalloc(absize, GFP_ATOMIC); - if (!buffer) - return -ENOMEM; + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINUP_MAX; +} - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - err = shash->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return err; +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; } +EXPORT_SYMBOL_GPL(shash_no_setkey); static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) { @@ -70,14 +51,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; - if ((unsigned long)key & alignmask) - err = shash_setkey_unaligned(tfm, key, keylen); - else - err = shash->setkey(tfm, key, keylen); - + err = shash->setkey(tfm, key, keylen); if (unlikely(err)) { shash_set_needkey(tfm, shash); return err; @@ -88,116 +64,119 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int __crypto_shash_init(struct shash_desc *desc) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned int unaligned_len = alignmask + 1 - - ((unsigned long)data & alignmask); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); - int err; - - if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf))) - return -EINVAL; - if (unaligned_len > len) - unaligned_len = len; + if (crypto_shash_block_only(tfm)) { + u8 *buf = shash_desc_ctx(desc); - memcpy(buf, data, unaligned_len); - err = shash->update(desc, buf, unaligned_len); - memset(buf, 0, unaligned_len); + buf += crypto_shash_descsize(tfm) - 1; + *buf = 0; + } - return err ?: - shash->update(desc, data + unaligned_len, len - unaligned_len); + return crypto_shash_alg(tfm)->init(desc); } -int crypto_shash_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +int crypto_shash_init(struct shash_desc *desc) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return __crypto_shash_init(desc); +} +EXPORT_SYMBOL_GPL(crypto_shash_init); - if ((unsigned long)data & alignmask) - return shash_update_unaligned(desc, data, len); +static int shash_default_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct shash_alg *shash = crypto_shash_alg(desc->tfm); - return shash->update(desc, data, len); + return shash->update(desc, data, len) ?: + shash->final(desc, out); } -EXPORT_SYMBOL_GPL(crypto_shash_update); -static int shash_final_unaligned(struct shash_desc *desc, u8 *out) +static int crypto_shash_op_and_zero( + int (*op)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out), + struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - struct crypto_shash *tfm = desc->tfm; - unsigned long alignmask = crypto_shash_alignmask(tfm); - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned int ds = crypto_shash_digestsize(tfm); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; - if (WARN_ON(buf + ds > ubuf + sizeof(ubuf))) - return -EINVAL; - - err = shash->final(desc, buf); - if (err) - goto out; - - memcpy(out, buf, ds); - -out: - memset(buf, 0, ds); + err = op(desc, data, len, out); + memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm)); return err; } -int crypto_shash_final(struct shash_desc *desc, u8 *out) +int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data, + unsigned int len, u8 *restrict out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + u8 *blenp = shash_desc_ctx(desc); + bool finup_max, nonzero; + unsigned int bs; + int err; + u8 *buf; - if ((unsigned long)out & alignmask) - return shash_final_unaligned(desc, out); + if (!crypto_shash_block_only(tfm)) { + if (out) + goto finup; + return crypto_shash_alg(tfm)->update(desc, data, len); + } - return shash->final(desc, out); -} -EXPORT_SYMBOL_GPL(crypto_shash_final); + finup_max = out && crypto_shash_finup_max(tfm); -static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return crypto_shash_update(desc, data, len) ?: - crypto_shash_final(desc, out); -} + /* Retain extra block for final nonzero algorithms. */ + nonzero = crypto_shash_final_nonzero(tfm); -int crypto_shash_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + /* + * The partial block buffer follows the algorithm desc context. + * The byte following that contains the length. + */ + blenp += crypto_shash_descsize(tfm) - 1; + bs = crypto_shash_blocksize(tfm); + buf = blenp - bs; + + if (likely(!*blenp && finup_max)) + goto finup; + + while ((*blenp + len) >= bs + nonzero) { + unsigned int nbytes = len - nonzero; + const u8 *src = data; + + if (*blenp) { + memcpy(buf + *blenp, data, bs - *blenp); + nbytes = bs; + src = buf; + } + + err = crypto_shash_alg(tfm)->update(desc, src, nbytes); + if (err < 0) + return err; + + data += nbytes - err - *blenp; + len -= nbytes - err - *blenp; + *blenp = 0; + } - if (((unsigned long)data | (unsigned long)out) & alignmask) - return shash_finup_unaligned(desc, data, len, out); + if (*blenp || !out) { + memcpy(buf + *blenp, data, len); + *blenp += len; + if (!out) + return 0; + data = buf; + len = *blenp; + } - return shash->finup(desc, data, len, out); +finup: + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_finup); -static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int shash_default_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - return crypto_shash_init(desc) ?: + return __crypto_shash_init(desc) ?: crypto_shash_finup(desc, data, len, out); } @@ -205,16 +184,12 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - if (((unsigned long)data | (unsigned long)out) & alignmask) - return shash_digest_unaligned(desc, data, len, out); - - return shash->digest(desc, data, len, out); + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_digest); @@ -222,196 +197,107 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, unsigned int len, u8 *out) { SHASH_DESC_ON_STACK(desc, tfm); - int err; desc->tfm = tfm; - - err = crypto_shash_digest(desc, data, len, out); - - shash_desc_zero(desc); - - return err; + return crypto_shash_digest(desc, data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); -static int shash_default_export(struct shash_desc *desc, void *out) +static int __crypto_shash_export(struct shash_desc *desc, void *out, + int (*export)(struct shash_desc *desc, + void *out)) { - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); - return 0; -} - -static int shash_default_import(struct shash_desc *desc, const void *in) -{ - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); - return 0; -} - -static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + struct crypto_shash *tfm = desc->tfm; + u8 *buf = shash_desc_ctx(desc); + unsigned int plen, ss; + + plen = crypto_shash_blocksize(tfm) + 1; + ss = crypto_shash_statesize(tfm); + if (crypto_shash_block_only(tfm)) + ss -= plen; + if (!export) { + memcpy(out, buf, ss); + return 0; + } - return crypto_shash_setkey(*ctx, key, keylen); + return export(desc, out); } -static int shash_async_init(struct ahash_request *req) +int crypto_shash_export_core(struct shash_desc *desc, void *out) { - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_init(desc); + return __crypto_shash_export(desc, out, + crypto_shash_alg(desc->tfm)->export_core); } +EXPORT_SYMBOL_GPL(crypto_shash_export_core); -int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) +int crypto_shash_export(struct shash_desc *desc, void *out) { - struct crypto_hash_walk walk; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; - nbytes = crypto_hash_walk_done(&walk, nbytes)) - nbytes = crypto_shash_update(desc, walk.data, nbytes); - - return nbytes; -} -EXPORT_SYMBOL_GPL(shash_ahash_update); + struct crypto_shash *tfm = desc->tfm; -static int shash_async_update(struct ahash_request *req) -{ - return shash_ahash_update(req, ahash_request_ctx(req)); -} + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); -static int shash_async_final(struct ahash_request *req) -{ - return crypto_shash_final(ahash_request_ctx(req), req->result); + memcpy(out + ss - plen, buf + descsize - plen, plen); + } + return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export); } +EXPORT_SYMBOL_GPL(crypto_shash_export); -int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) +static int __crypto_shash_import(struct shash_desc *desc, const void *in, + int (*import)(struct shash_desc *desc, + const void *in)) { - struct crypto_hash_walk walk; - int nbytes; + struct crypto_shash *tfm = desc->tfm; + unsigned int descsize, plen, ss; + u8 *buf = shash_desc_ctx(desc); - nbytes = crypto_hash_walk_first(req, &walk); - if (!nbytes) - return crypto_shash_final(desc, req->result); + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; - do { - nbytes = crypto_hash_walk_last(&walk) ? - crypto_shash_finup(desc, walk.data, nbytes, - req->result) : - crypto_shash_update(desc, walk.data, nbytes); - nbytes = crypto_hash_walk_done(&walk, nbytes); - } while (nbytes > 0); + ss = crypto_shash_statesize(tfm); + if (crypto_shash_block_only(tfm)) { + plen = crypto_shash_blocksize(tfm) + 1; + ss -= plen; + descsize = crypto_shash_descsize(tfm); + buf[descsize - 1] = 0; + } + if (!import) { + memcpy(buf, in, ss); + return 0; + } - return nbytes; + return import(desc, in); } -EXPORT_SYMBOL_GPL(shash_ahash_finup); -static int shash_async_finup(struct ahash_request *req) +int crypto_shash_import_core(struct shash_desc *desc, const void *in) { - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_finup(req, desc); + return __crypto_shash_import(desc, in, + crypto_shash_alg(desc->tfm)->import_core); } +EXPORT_SYMBOL_GPL(crypto_shash_import_core); -int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) +int crypto_shash_import(struct shash_desc *desc, const void *in) { - unsigned int nbytes = req->nbytes; - struct scatterlist *sg; - unsigned int offset; + struct crypto_shash *tfm = desc->tfm; int err; - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; - - data = kmap_atomic(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_atomic(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); - - return err; -} -EXPORT_SYMBOL_GPL(shash_ahash_digest); - -static int shash_async_digest(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); + err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import); + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); - desc->tfm = *ctx; - - return shash_ahash_digest(req, desc); -} - -static int shash_async_export(struct ahash_request *req, void *out) -{ - return crypto_shash_export(ahash_request_ctx(req), out); -} - -static int shash_async_import(struct ahash_request *req, const void *in) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_import(desc, in); -} - -static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - - crypto_free_shash(*ctx); -} - -int crypto_init_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); - struct crypto_ahash *crt = __crypto_ahash_cast(tfm); - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - struct crypto_shash *shash; - - if (!crypto_mod_get(calg)) - return -EAGAIN; - - shash = crypto_create_tfm(calg, &crypto_shash_type); - if (IS_ERR(shash)) { - crypto_mod_put(calg); - return PTR_ERR(shash); + memcpy(buf + descsize - plen, in + ss - plen, plen); + if (buf[descsize - 1] >= plen) + err = -EOVERFLOW; } - - *ctx = shash; - tfm->exit = crypto_exit_shash_ops_async; - - crt->init = shash_async_init; - crt->update = shash_async_update; - crt->final = shash_async_final; - crt->finup = shash_async_finup; - crt->digest = shash_async_digest; - if (crypto_shash_alg_has_setkey(alg)) - crt->setkey = shash_async_setkey; - - crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & - CRYPTO_TFM_NEED_KEY); - - crt->export = shash_async_export; - crt->import = shash_async_import; - - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); - - return 0; + return err; } +EXPORT_SYMBOL_GPL(crypto_shash_import); static void crypto_shash_exit_tfm(struct crypto_tfm *tfm) { @@ -425,9 +311,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); struct shash_alg *alg = crypto_shash_alg(hash); - int err; - - hash->descsize = alg->descsize; shash_set_needkey(hash, alg); @@ -437,18 +320,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) if (!alg->init_tfm) return 0; - err = alg->init_tfm(hash); - if (err) - return err; - - /* ->init_tfm() may have increased the descsize. */ - if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) { - if (alg->exit_tfm) - alg->exit_tfm(hash); - return -EINVAL; - } - - return 0; + return alg->init_tfm(hash); } static void crypto_shash_free_instance(struct crypto_instance *inst) @@ -458,8 +330,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst) shash->free(shash); } -#ifdef CONFIG_NET -static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_shash_report( + struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; struct shash_alg *salg = __crypto_shash_alg(alg); @@ -473,12 +345,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); } -#else -static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; @@ -491,18 +357,21 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "digestsize : %u\n", salg->digestsize); } -static const struct crypto_type crypto_shash_type = { +const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, .free = crypto_shash_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_shash_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_shash_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SHASH, .tfmsize = offsetof(struct crypto_shash, base), + .algsize = offsetof(struct shash_alg, base), }; int crypto_grab_shash(struct crypto_shash_spawn *spawn, @@ -521,34 +390,130 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_shash); -static int shash_prepare_alg(struct shash_alg *alg) +int crypto_has_shash(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_shash_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_shash); + +struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) +{ + struct crypto_tfm *tfm = crypto_shash_tfm(hash); + struct shash_alg *alg = crypto_shash_alg(hash); + struct crypto_shash *nhash; + int err; + + if (!crypto_shash_alg_has_setkey(alg)) { + tfm = crypto_tfm_get(tfm); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + return hash; + } + + if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init)) + return ERR_PTR(-ENOSYS); + + nhash = crypto_clone_tfm(&crypto_shash_type, tfm); + if (IS_ERR(nhash)) + return nhash; + + if (alg->clone_tfm) { + err = alg->clone_tfm(nhash, hash); + if (err) { + crypto_free_shash(nhash); + return ERR_PTR(err); + } + } + + if (alg->exit_tfm) + crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm; + + return nhash; +} +EXPORT_SYMBOL_GPL(crypto_clone_shash); + +int hash_prepare_alg(struct hash_alg_common *alg) { struct crypto_alg *base = &alg->base; - if (alg->digestsize > HASH_MAX_DIGESTSIZE || - alg->descsize > HASH_MAX_DESCSIZE || - alg->statesize > HASH_MAX_STATESIZE) + if (alg->digestsize > HASH_MAX_DIGESTSIZE) + return -EINVAL; + + /* alignmask is not useful for hashes, so it is not supported. */ + if (base->cra_alignmask) return -EINVAL; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + + return 0; +} + +static int shash_default_export_core(struct shash_desc *desc, void *out) +{ + return -ENOSYS; +} + +static int shash_default_import_core(struct shash_desc *desc, const void *in) +{ + return -ENOSYS; +} + +static int shash_prepare_alg(struct shash_alg *alg) +{ + struct crypto_alg *base = &alg->halg.base; + int err; + if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; + err = hash_prepare_alg(&alg->halg); + if (err) + return err; + base->cra_type = &crypto_shash_type; - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; + /* + * Handle missing optional functions. For each one we can either + * install a default here, or we can leave the pointer as NULL and check + * the pointer for NULL in crypto_shash_*(), avoiding an indirect call + * when the default behavior is desired. For ->finup and ->digest we + * install defaults, since for optimal performance algorithms should + * implement these anyway. On the other hand, for ->import and + * ->export the common case and best performance comes from the simple + * memcpy of the shash_desc_ctx, so when those pointers are NULL we + * leave them NULL and provide the memcpy with no indirect call. + */ if (!alg->finup) - alg->finup = shash_finup_unaligned; + alg->finup = shash_default_finup; if (!alg->digest) - alg->digest = shash_digest_unaligned; - if (!alg->export) { - alg->export = shash_default_export; - alg->import = shash_default_import; - alg->statesize = alg->descsize; - } + alg->digest = shash_default_digest; + if (!alg->export && !alg->halg.statesize) + alg->halg.statesize = alg->descsize; if (!alg->setkey) alg->setkey = shash_no_setkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + alg->descsize += base->cra_blocksize + 1; + alg->statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = shash_default_export_core; + alg->import_core = shash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + + if (alg->descsize > HASH_MAX_DESCSIZE) + return -EINVAL; + if (alg->statesize > HASH_MAX_STATESIZE) + return -EINVAL; + + base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize; + return 0; } diff --git a/crypto/sig.c b/crypto/sig.c new file mode 100644 index 000000000000..beba745b6405 --- /dev/null +++ b/crypto/sig.c @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Signature Algorithm + * + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <crypto/internal/sig.h> +#include <linux/cryptouser.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <net/netlink.h> + +#include "internal.h" + +static void crypto_sig_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_sig *sig = __crypto_sig_tfm(tfm); + struct sig_alg *alg = crypto_sig_alg(sig); + + alg->exit(sig); +} + +static int crypto_sig_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_sig *sig = __crypto_sig_tfm(tfm); + struct sig_alg *alg = crypto_sig_alg(sig); + + if (alg->exit) + sig->base.exit = crypto_sig_exit_tfm; + + if (alg->init) + return alg->init(sig); + + return 0; +} + +static void crypto_sig_free_instance(struct crypto_instance *inst) +{ + struct sig_instance *sig = sig_instance(inst); + + sig->free(sig); +} + +static void __maybe_unused crypto_sig_show(struct seq_file *m, + struct crypto_alg *alg) +{ + seq_puts(m, "type : sig\n"); +} + +static int __maybe_unused crypto_sig_report(struct sk_buff *skb, + struct crypto_alg *alg) +{ + struct crypto_report_sig rsig = {}; + + strscpy(rsig.type, "sig", sizeof(rsig.type)); + + return nla_put(skb, CRYPTOCFGA_REPORT_SIG, sizeof(rsig), &rsig); +} + +static const struct crypto_type crypto_sig_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_sig_init_tfm, + .free = crypto_sig_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_sig_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_sig_report, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_SIG, + .tfmsize = offsetof(struct crypto_sig, base), + .algsize = offsetof(struct sig_alg, base), +}; + +struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_sig); + +static int sig_default_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + return -ENOSYS; +} + +static int sig_default_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *dst, unsigned int dlen) +{ + return -ENOSYS; +} + +static int sig_default_set_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + return -ENOSYS; +} + +static unsigned int sig_default_size(struct crypto_sig *tfm) +{ + return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE); +} + +static int sig_prepare_alg(struct sig_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (!alg->sign) + alg->sign = sig_default_sign; + if (!alg->verify) + alg->verify = sig_default_verify; + if (!alg->set_priv_key) + alg->set_priv_key = sig_default_set_key; + if (!alg->set_pub_key) + return -EINVAL; + if (!alg->key_size) + return -EINVAL; + if (!alg->max_size) + alg->max_size = sig_default_size; + if (!alg->digest_size) + alg->digest_size = sig_default_size; + + base->cra_type = &crypto_sig_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SIG; + + return 0; +} + +int crypto_register_sig(struct sig_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = sig_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_sig); + +void crypto_unregister_sig(struct sig_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_sig); + +int sig_register_instance(struct crypto_template *tmpl, + struct sig_instance *inst) +{ + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; + + err = sig_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, sig_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(sig_register_instance); + +int crypto_grab_sig(struct crypto_sig_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_sig_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_sig); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Public Key Signature Algorithms"); diff --git a/crypto/simd.c b/crypto/simd.c index edaa479a1ec5..b07721d1f3f6 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) return 0; } -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename) { struct simd_skcipher_alg *salg; - struct crypto_skcipher *tfm; - struct skcipher_alg *ialg; struct skcipher_alg *alg; int err; - tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_skcipher_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_skcipher(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; + goto out; } EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_skcipher_create_compat(algname, drvname, basename); -} -EXPORT_SYMBOL_GPL(simd_skcipher_create); - void simd_skcipher_free(struct simd_skcipher_alg *salg) { crypto_unregister_skcipher(&salg->alg); @@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm) return 0; } -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename) +static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, + const char *algname, + const char *drvname, + const char *basename) { struct simd_aead_alg *salg; - struct crypto_aead *tfm; - struct aead_alg *ialg; struct aead_alg *alg; int err; - tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_aead_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_aead(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; -} -EXPORT_SYMBOL_GPL(simd_aead_create_compat); - -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_aead_create_compat(algname, drvname, basename); + goto out; } -EXPORT_SYMBOL_GPL(simd_aead_create); -void simd_aead_free(struct simd_aead_alg *salg) +static void simd_aead_free(struct simd_aead_alg *salg) { crypto_unregister_aead(&salg->alg); kfree(salg); } -EXPORT_SYMBOL_GPL(simd_aead_free); int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs) @@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_aead_create_compat(algname, drvname, basename); + simd = simd_aead_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -523,4 +477,5 @@ void simd_unregister_aeads(struct aead_alg *algs, int count, } EXPORT_SYMBOL_GPL(simd_unregister_aeads); +MODULE_DESCRIPTION("Shared crypto SIMD helpers"); MODULE_LICENSE("GPL"); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index a15376245416..14a820cb06c7 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -15,147 +15,111 @@ #include <crypto/scatterwalk.h> #include <linux/bug.h> #include <linux/cryptouser.h> -#include <linux/compiler.h> -#include <linux/list.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/rtnetlink.h> #include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> +#include "skcipher.h" -#include "internal.h" +#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e enum { - SKCIPHER_WALK_PHYS = 1 << 0, - SKCIPHER_WALK_SLOW = 1 << 1, - SKCIPHER_WALK_COPY = 1 << 2, - SKCIPHER_WALK_DIFF = 1 << 3, - SKCIPHER_WALK_SLEEP = 1 << 4, + SKCIPHER_WALK_SLOW = 1 << 0, + SKCIPHER_WALK_COPY = 1 << 1, + SKCIPHER_WALK_DIFF = 1 << 2, + SKCIPHER_WALK_SLEEP = 1 << 3, }; -struct skcipher_walk_buffer { - struct list_head entry; - struct scatter_walk dst; - unsigned int len; - u8 *data; - u8 buffer[]; -}; +static const struct crypto_type crypto_skcipher_type; static int skcipher_walk_next(struct skcipher_walk *walk); -static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) -{ - if (PageHighMem(scatterwalk_page(walk))) - kunmap_atomic(vaddr); -} - -static inline void *skcipher_map(struct scatter_walk *walk) -{ - struct page *page = scatterwalk_page(walk); - - return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + - offset_in_page(walk->offset); -} - -static inline void skcipher_map_src(struct skcipher_walk *walk) -{ - walk->src.virt.addr = skcipher_map(&walk->in); -} - -static inline void skcipher_map_dst(struct skcipher_walk *walk) -{ - walk->dst.virt.addr = skcipher_map(&walk->out); -} - -static inline void skcipher_unmap_src(struct skcipher_walk *walk) -{ - skcipher_unmap(&walk->in, walk->src.virt.addr); -} - -static inline void skcipher_unmap_dst(struct skcipher_walk *walk) -{ - skcipher_unmap(&walk->out, walk->dst.virt.addr); -} - static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } -/* Get a spot of the specified length that does not straddle a page. - * The caller needs to ensure that there is enough space for this operation. - */ -static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) +static inline struct skcipher_alg *__crypto_skcipher_alg( + struct crypto_alg *alg) { - u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); - - return max(start, end_page); + return container_of(alg, struct skcipher_alg, base); } -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) -{ - u8 *addr; - - addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); - addr = skcipher_get_spot(addr, bsize); - scatterwalk_copychunks(addr, &walk->out, bsize, - (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); - return 0; -} - -int skcipher_walk_done(struct skcipher_walk *walk, int err) +/** + * skcipher_walk_done() - finish one step of a skcipher_walk + * @walk: the skcipher_walk + * @res: number of bytes *not* processed (>= 0) from walk->nbytes, + * or a -errno value to terminate the walk due to an error + * + * This function cleans up after one step of walking through the source and + * destination scatterlists, and advances to the next step if applicable. + * walk->nbytes is set to the number of bytes available in the next step, + * walk->total is set to the new total number of bytes remaining, and + * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there + * is no more data, or if an error occurred (i.e. -errno return), then + * walk->nbytes and walk->total are set to 0 and all resources owned by the + * skcipher_walk are freed. + * + * Return: 0 or a -errno value. If @res was a -errno value then it will be + * returned, but other errors may occur too. + */ +int skcipher_walk_done(struct skcipher_walk *walk, int res) { - unsigned int n = walk->nbytes; - unsigned int nbytes = 0; + unsigned int n = walk->nbytes; /* num bytes processed this step */ + unsigned int total = 0; /* new total remaining */ if (!n) goto finish; - if (likely(err >= 0)) { - n -= err; - nbytes = walk->total - n; + if (likely(res >= 0)) { + n -= res; /* subtract num bytes *not* processed */ + total = walk->total - n; } - if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | + if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { -unmap_src: - skcipher_unmap_src(walk); + scatterwalk_advance(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_DIFF) { - skcipher_unmap_dst(walk); - goto unmap_src; + scatterwalk_done_src(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_COPY) { - skcipher_map_dst(walk); - memcpy(walk->dst.virt.addr, walk->page, n); - skcipher_unmap_dst(walk); - } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (err > 0) { + scatterwalk_advance(&walk->in, n); + scatterwalk_map(&walk->out); + memcpy(walk->out.addr, walk->page, n); + } else { /* SKCIPHER_WALK_SLOW */ + if (res > 0) { /* * Didn't process all bytes. Either the algorithm is * broken, or this was the last step and it turned out * the message wasn't evenly divisible into blocks but * the algorithm requires it. */ - err = -EINVAL; - nbytes = 0; + res = -EINVAL; + total = 0; } else - n = skcipher_done_slow(walk, n); + memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); + goto dst_done; } - if (err > 0) - err = 0; + scatterwalk_done_dst(&walk->out, n); +dst_done: - walk->total = nbytes; - walk->nbytes = 0; + if (res > 0) + res = 0; - scatterwalk_advance(&walk->in, n); - scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); + walk->total = total; + walk->nbytes = 0; - if (nbytes) { - crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? - CRYPTO_TFM_REQ_MAY_SLEEP : 0); + if (total) { + if (walk->flags & SKCIPHER_WALK_SLEEP) + cond_resched(); + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); return skcipher_walk_next(walk); } @@ -164,9 +128,6 @@ finish: if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; - if (walk->flags & SKCIPHER_WALK_PHYS) - goto out; - if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) @@ -175,107 +136,33 @@ finish: free_page((unsigned long)walk->page); out: - return err; + return res; } EXPORT_SYMBOL_GPL(skcipher_walk_done); -void skcipher_walk_complete(struct skcipher_walk *walk, int err) -{ - struct skcipher_walk_buffer *p, *tmp; - - list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { - u8 *data; - - if (err) - goto done; - - data = p->data; - if (!data) { - data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); - data = skcipher_get_spot(data, walk->stride); - } - - scatterwalk_copychunks(data, &p->dst, p->len, 1); - - if (offset_in_page(p->data) + p->len + walk->stride > - PAGE_SIZE) - free_page((unsigned long)p->data); - -done: - list_del(&p->entry); - kfree(p); - } - - if (!err && walk->iv != walk->oiv) - memcpy(walk->oiv, walk->iv, walk->ivsize); - if (walk->buffer != walk->page) - kfree(walk->buffer); - if (walk->page) - free_page((unsigned long)walk->page); -} -EXPORT_SYMBOL_GPL(skcipher_walk_complete); - -static void skcipher_queue_write(struct skcipher_walk *walk, - struct skcipher_walk_buffer *p) -{ - p->dst = walk->out; - list_add_tail(&p->entry, &walk->buffers); -} - static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { - bool phys = walk->flags & SKCIPHER_WALK_PHYS; unsigned alignmask = walk->alignmask; - struct skcipher_walk_buffer *p; - unsigned a; unsigned n; - u8 *buffer; - void *v; - - if (!phys) { - if (!walk->buffer) - walk->buffer = walk->page; - buffer = walk->buffer; - if (buffer) - goto ok; - } - - /* Start with the minimum alignment of kmalloc. */ - a = crypto_tfm_ctx_alignment() - 1; - n = bsize; + void *buffer; - if (phys) { - /* Calculate the minimum alignment of p->buffer. */ - a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; - n += sizeof(*p); - } - - /* Minimum size to align p->buffer by alignmask. */ - n += alignmask & ~a; - - /* Minimum size to ensure p->buffer does not straddle a page. */ - n += (bsize - 1) & ~(alignmask | a); - - v = kzalloc(n, skcipher_walk_gfp(walk)); - if (!v) - return skcipher_walk_done(walk, -ENOMEM); - - if (phys) { - p = v; - p->len = bsize; - skcipher_queue_write(walk, p); - buffer = p->buffer; - } else { - walk->buffer = v; - buffer = v; + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (!buffer) { + /* Min size for a buffer of bsize bytes aligned to alignmask */ + n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + buffer = kzalloc(n, skcipher_walk_gfp(walk)); + if (!buffer) + return skcipher_walk_done(walk, -ENOMEM); + walk->buffer = buffer; } -ok: - walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); - walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); - walk->src.virt.addr = walk->dst.virt.addr; - - scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); + buffer = PTR_ALIGN(buffer, alignmask + 1); + memcpy_from_scatterwalk(buffer, &walk->in, bsize); + walk->out.__addr = buffer; + walk->in.__addr = walk->out.addr; walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; @@ -285,33 +172,18 @@ ok: static int skcipher_next_copy(struct skcipher_walk *walk) { - struct skcipher_walk_buffer *p; - u8 *tmp = walk->page; - - skcipher_map_src(walk); - memcpy(tmp, walk->src.virt.addr, walk->nbytes); - skcipher_unmap_src(walk); - - walk->src.virt.addr = tmp; - walk->dst.virt.addr = tmp; - - if (!(walk->flags & SKCIPHER_WALK_PHYS)) - return 0; - - p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); - if (!p) - return -ENOMEM; - - p->data = walk->page; - p->len = walk->nbytes; - skcipher_queue_write(walk, p); + void *tmp = walk->page; - if (offset_in_page(walk->page) + walk->nbytes + walk->stride > - PAGE_SIZE) - walk->page = NULL; - else - walk->page += walk->nbytes; + scatterwalk_map(&walk->in); + memcpy(tmp, walk->in.addr, walk->nbytes); + scatterwalk_unmap(&walk->in); + /* + * walk->in is advanced later when the number of bytes actually + * processed (which might be less than walk->nbytes) is known. + */ + walk->in.__addr = tmp; + walk->out.__addr = tmp; return 0; } @@ -319,23 +191,17 @@ static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; - walk->src.phys.page = scatterwalk_page(&walk->in); - walk->src.phys.offset = offset_in_page(walk->in.offset); - walk->dst.phys.page = scatterwalk_page(&walk->out); - walk->dst.phys.offset = offset_in_page(walk->out.offset); - - if (walk->flags & SKCIPHER_WALK_PHYS) - return 0; - - diff = walk->src.phys.offset - walk->dst.phys.offset; - diff |= walk->src.virt.page - walk->dst.virt.page; + diff = offset_in_page(walk->in.offset) - + offset_in_page(walk->out.offset); + diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - + (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); - skcipher_map_src(walk); - walk->dst.virt.addr = walk->src.virt.addr; + scatterwalk_map(&walk->out); + walk->in.__addr = walk->out.__addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; - skcipher_map_dst(walk); + scatterwalk_map(&walk->in); } return 0; @@ -345,10 +211,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; - int err; - - walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF); n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); @@ -360,9 +222,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk) return skcipher_walk_done(walk, -EINVAL); slow_path: - err = skcipher_next_slow(walk, bsize); - goto set_phys_lowmem; + return skcipher_next_slow(walk, bsize); } + walk->nbytes = n; if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { @@ -372,58 +234,30 @@ slow_path: if (!walk->page) goto slow_path; } - - walk->nbytes = min_t(unsigned, n, - PAGE_SIZE - offset_in_page(walk->page)); walk->flags |= SKCIPHER_WALK_COPY; - err = skcipher_next_copy(walk); - goto set_phys_lowmem; + return skcipher_next_copy(walk); } - walk->nbytes = n; - return skcipher_next_fast(walk); - -set_phys_lowmem: - if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { - walk->src.phys.page = virt_to_page(walk->src.virt.addr); - walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); - walk->src.phys.offset &= PAGE_SIZE - 1; - walk->dst.phys.offset &= PAGE_SIZE - 1; - } - return err; } static int skcipher_copy_iv(struct skcipher_walk *walk) { - unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; - unsigned bs = walk->stride; - unsigned aligned_bs; + unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); unsigned size; u8 *iv; - aligned_bs = ALIGN(bs, alignmask + 1); - - /* Minimum size to align buffer by alignmask. */ - size = alignmask & ~a; - - if (walk->flags & SKCIPHER_WALK_PHYS) - size += ivsize; - else { - size += aligned_bs + ivsize; - - /* Minimum size to ensure buffer does not straddle a page. */ - size += (bs - 1) & ~(alignmask | a); - } + /* Min size for a buffer of stride + ivsize, aligned to alignmask */ + size = aligned_stride + ivsize + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; - iv = PTR_ALIGN(walk->buffer, alignmask + 1); - iv = skcipher_get_spot(iv, bs) + aligned_bs; + iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; @@ -431,7 +265,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) static int skcipher_walk_first(struct skcipher_walk *walk) { - if (WARN_ON_ONCE(in_irq())) + if (WARN_ON_ONCE(in_hardirq())) return -EDEADLK; walk->buffer = NULL; @@ -446,15 +280,24 @@ static int skcipher_walk_first(struct skcipher_walk *walk) return skcipher_walk_next(walk); } -static int skcipher_walk_skcipher(struct skcipher_walk *walk, - struct skcipher_request *req) +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg; + + might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + + alg = crypto_skcipher_alg(tfm); walk->total = req->cryptlen; walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; @@ -462,90 +305,50 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->flags &= ~SKCIPHER_WALK_SLEEP; - walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - SKCIPHER_WALK_SLEEP : 0; - walk->blocksize = crypto_skcipher_blocksize(tfm); - walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); - return skcipher_walk_first(walk); -} - -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, bool atomic) -{ - int err; - - might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - - walk->flags &= ~SKCIPHER_WALK_PHYS; - - err = skcipher_walk_skcipher(walk, req); - - walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; + if (alg->co.base.cra_type != &crypto_skcipher_type) + walk->stride = alg->co.chunksize; + else + walk->stride = alg->walksize; - return err; + return skcipher_walk_first(walk); } EXPORT_SYMBOL_GPL(skcipher_walk_virt); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req) -{ - walk->flags |= SKCIPHER_WALK_PHYS; - - INIT_LIST_HEAD(&walk->buffers); - - return skcipher_walk_skcipher(walk, req); -} -EXPORT_SYMBOL_GPL(skcipher_walk_async); - -static int skcipher_walk_aead_common(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - int err; walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; - walk->flags &= ~SKCIPHER_WALK_PHYS; - - scatterwalk_start(&walk->in, req->src); - scatterwalk_start(&walk->out, req->dst); - - scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); - scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); - - scatterwalk_done(&walk->in, 0, walk->total); - scatterwalk_done(&walk->out, 0, walk->total); - - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) - walk->flags |= SKCIPHER_WALK_SLEEP; - else - walk->flags &= ~SKCIPHER_WALK_SLEEP; + scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen); + scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen); walk->blocksize = crypto_aead_blocksize(tfm); walk->stride = crypto_aead_chunksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); - err = skcipher_walk_first(walk); - - if (atomic) - walk->flags &= ~SKCIPHER_WALK_SLEEP; - - return err; + return skcipher_walk_first(walk); } -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { walk->total = req->cryptlen; @@ -553,8 +356,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -598,6 +402,17 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned long alignmask = crypto_skcipher_alignmask(tfm); int err; + if (cipher->co.base.cra_type != &crypto_skcipher_type) { + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(*ctx, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_lskcipher_setkey(*ctx, key, keylen); + goto out; + } + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) return -EINVAL; @@ -606,6 +421,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); +out: if (unlikely(err)) { skcipher_set_needkey(tfm); return err; @@ -619,37 +435,87 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_skcipher_alg(tfm)->encrypt(req); - crypto_stats_skcipher_encrypt(cryptlen, ret, alg); - return ret; + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_encrypt_sg(req); + return alg->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_skcipher_alg(tfm)->decrypt(req); - crypto_stats_skcipher_decrypt(cryptlen, ret, alg); - return ret; + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_decrypt_sg(req); + return alg->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); +static int crypto_lskcipher_export(struct skcipher_request *req, void *out) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + u8 *ivs = skcipher_request_ctx(req); + + ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); + + memcpy(out, ivs + crypto_skcipher_ivsize(tfm), + crypto_skcipher_statesize(tfm)); + + return 0; +} + +static int crypto_lskcipher_import(struct skcipher_request *req, const void *in) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + u8 *ivs = skcipher_request_ctx(req); + + ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); + + memcpy(ivs + crypto_skcipher_ivsize(tfm), in, + crypto_skcipher_statesize(tfm)); + + return 0; +} + +static int skcipher_noexport(struct skcipher_request *req, void *out) +{ + return 0; +} + +static int skcipher_noimport(struct skcipher_request *req, const void *in) +{ + return 0; +} + +int crypto_skcipher_export(struct skcipher_request *req, void *out) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_export(req, out); + return alg->export(req, out); +} +EXPORT_SYMBOL_GPL(crypto_skcipher_export); + +int crypto_skcipher_import(struct skcipher_request *req, const void *in) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_import(req, in); + return alg->import(req, in); +} +EXPORT_SYMBOL_GPL(crypto_skcipher_import); + static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); @@ -665,6 +531,20 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher_set_needkey(skcipher); + if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) { + unsigned am = crypto_skcipher_alignmask(skcipher); + unsigned reqsize; + + reqsize = am & ~(crypto_tfm_ctx_alignment() - 1); + reqsize += crypto_skcipher_ivsize(skcipher); + reqsize += crypto_skcipher_statesize(skcipher); + crypto_skcipher_set_reqsize(skcipher, reqsize); + + return crypto_init_lskcipher_ops_sg(tfm); + } + + crypto_skcipher_set_reqsize(skcipher, crypto_tfm_alg_reqsize(tfm)); + if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -674,6 +554,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type != &crypto_skcipher_type) + return sizeof(struct crypto_lskcipher *); + + return crypto_alg_extsize(alg); +} + static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = @@ -686,26 +574,25 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) { - struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, - base); + struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); seq_printf(m, "type : skcipher\n"); seq_printf(m, "async : %s\n", - alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); seq_printf(m, "ivsize : %u\n", skcipher->ivsize); seq_printf(m, "chunksize : %u\n", skcipher->chunksize); seq_printf(m, "walksize : %u\n", skcipher->walksize); + seq_printf(m, "statesize : %u\n", skcipher->statesize); } -#ifdef CONFIG_NET -static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +static int __maybe_unused crypto_skcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) { + struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); struct crypto_report_blkcipher rblkcipher; - struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, - base); memset(&rblkcipher, 0, sizeof(rblkcipher)); @@ -720,25 +607,22 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, sizeof(rblkcipher), &rblkcipher); } -#else -static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif static const struct crypto_type crypto_skcipher_type = { - .extsize = crypto_alg_extsize, + .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_skcipher_show, #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_skcipher_report, +#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), + .algsize = offsetof(struct skcipher_alg, base), }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, @@ -763,7 +647,8 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( struct crypto_skcipher *tfm; /* Only sync algorithms allowed. */ - mask |= CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; + type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); @@ -787,21 +672,45 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_skcipher); -static int skcipher_prepare_alg(struct skcipher_alg *alg) +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || - alg->walksize > PAGE_SIZE / 8) + alg->statesize > PAGE_SIZE / 2 || + (alg->ivsize + alg->statesize) > PAGE_SIZE / 2) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; + + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + + return 0; +} + +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->walksize > PAGE_SIZE / 8) + return -EINVAL; + if (!alg->walksize) alg->walksize = alg->chunksize; + if (!alg->statesize) { + alg->import = skcipher_noimport; + alg->export = skcipher_noexport; + } else if (!(alg->import && alg->export)) + return -EINVAL; + base->cra_type = &crypto_skcipher_type; - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; return 0; @@ -981,4 +890,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/skcipher.h b/crypto/skcipher.h new file mode 100644 index 000000000000..703651367dd8 --- /dev/null +++ b/crypto/skcipher.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _LOCAL_CRYPTO_SKCIPHER_H +#define _LOCAL_CRYPTO_SKCIPHER_H + +#include <crypto/internal/skcipher.h> +#include "internal.h" + +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_SKCIPHER_H */ diff --git a/crypto/sm2.c b/crypto/sm2.c deleted file mode 100644 index db8a4a265669..000000000000 --- a/crypto/sm2.c +++ /dev/null @@ -1,460 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SM2 asymmetric public-key algorithm - * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and - * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 - * - * Copyright (c) 2020, Alibaba Group. - * Authors: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <linux/mpi.h> -#include <crypto/internal/akcipher.h> -#include <crypto/akcipher.h> -#include <crypto/hash.h> -#include <crypto/sm3_base.h> -#include <crypto/rng.h> -#include <crypto/sm2.h> -#include "sm2signature.asn1.h" - -#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8) - -struct ecc_domain_parms { - const char *desc; /* Description of the curve. */ - unsigned int nbits; /* Number of bits. */ - unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */ - - /* The model describing this curve. This is mainly used to select - * the group equation. - */ - enum gcry_mpi_ec_models model; - - /* The actual ECC dialect used. This is used for curve specific - * optimizations and to select encodings etc. - */ - enum ecc_dialects dialect; - - const char *p; /* The prime defining the field. */ - const char *a, *b; /* The coefficients. For Twisted Edwards - * Curves b is used for d. For Montgomery - * Curves (a,b) has ((A-2)/4,B^-1). - */ - const char *n; /* The order of the base point. */ - const char *g_x, *g_y; /* Base point. */ - unsigned int h; /* Cofactor. */ -}; - -static const struct ecc_domain_parms sm2_ecp = { - .desc = "sm2p256v1", - .nbits = 256, - .fips = 0, - .model = MPI_EC_WEIERSTRASS, - .dialect = ECC_DIALECT_STANDARD, - .p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff", - .a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc", - .b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93", - .n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123", - .g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7", - .g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0", - .h = 1 -}; - -static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec) -{ - const struct ecc_domain_parms *ecp = &sm2_ecp; - MPI p, a, b; - MPI x, y; - int rc = -EINVAL; - - p = mpi_scanval(ecp->p); - a = mpi_scanval(ecp->a); - b = mpi_scanval(ecp->b); - if (!p || !a || !b) - goto free_p; - - x = mpi_scanval(ecp->g_x); - y = mpi_scanval(ecp->g_y); - if (!x || !y) - goto free; - - rc = -ENOMEM; - - ec->Q = mpi_point_new(0); - if (!ec->Q) - goto free; - - /* mpi_ec_setup_elliptic_curve */ - ec->G = mpi_point_new(0); - if (!ec->G) { - mpi_point_release(ec->Q); - goto free; - } - - mpi_set(ec->G->x, x); - mpi_set(ec->G->y, y); - mpi_set_ui(ec->G->z, 1); - - rc = -EINVAL; - ec->n = mpi_scanval(ecp->n); - if (!ec->n) { - mpi_point_release(ec->Q); - mpi_point_release(ec->G); - goto free; - } - - ec->h = ecp->h; - ec->name = ecp->desc; - mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b); - - rc = 0; - -free: - mpi_free(x); - mpi_free(y); -free_p: - mpi_free(p); - mpi_free(a); - mpi_free(b); - - return rc; -} - -static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec) -{ - mpi_ec_deinit(ec); - - memset(ec, 0, sizeof(*ec)); -} - -/* RESULT must have been initialized and is set on success to the - * point given by VALUE. - */ -static int sm2_ecc_os2ec(MPI_POINT result, MPI value) -{ - int rc; - size_t n; - unsigned char *buf; - MPI x, y; - - n = MPI_NBYTES(value); - buf = kmalloc(n, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value); - if (rc) - goto err_freebuf; - - rc = -EINVAL; - if (n < 1 || ((n - 1) % 2)) - goto err_freebuf; - /* No support for point compression */ - if (*buf != 0x4) - goto err_freebuf; - - rc = -ENOMEM; - n = (n - 1) / 2; - x = mpi_read_raw_data(buf + 1, n); - if (!x) - goto err_freebuf; - y = mpi_read_raw_data(buf + 1 + n, n); - if (!y) - goto err_freex; - - mpi_normalize(x); - mpi_normalize(y); - mpi_set(result->x, x); - mpi_set(result->y, y); - mpi_set_ui(result->z, 1); - - rc = 0; - - mpi_free(y); -err_freex: - mpi_free(x); -err_freebuf: - kfree(buf); - return rc; -} - -struct sm2_signature_ctx { - MPI sig_r; - MPI sig_s; -}; - -int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct sm2_signature_ctx *sig = context; - - if (!value || !vlen) - return -EINVAL; - - sig->sig_r = mpi_read_raw_data(value, vlen); - if (!sig->sig_r) - return -ENOMEM; - - return 0; -} - -int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct sm2_signature_ctx *sig = context; - - if (!value || !vlen) - return -EINVAL; - - sig->sig_s = mpi_read_raw_data(value, vlen); - if (!sig->sig_s) - return -ENOMEM; - - return 0; -} - -static int sm2_z_digest_update(struct shash_desc *desc, - MPI m, unsigned int pbytes) -{ - static const unsigned char zero[32]; - unsigned char *in; - unsigned int inlen; - - in = mpi_get_buffer(m, &inlen, NULL); - if (!in) - return -EINVAL; - - if (inlen < pbytes) { - /* padding with zero */ - crypto_sm3_update(desc, zero, pbytes - inlen); - crypto_sm3_update(desc, in, inlen); - } else if (inlen > pbytes) { - /* skip the starting zero */ - crypto_sm3_update(desc, in + inlen - pbytes, pbytes); - } else { - crypto_sm3_update(desc, in, inlen); - } - - kfree(in); - return 0; -} - -static int sm2_z_digest_update_point(struct shash_desc *desc, - MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes) -{ - MPI x, y; - int ret = -EINVAL; - - x = mpi_new(0); - y = mpi_new(0); - - if (!mpi_ec_get_affine(x, y, point, ec) && - !sm2_z_digest_update(desc, x, pbytes) && - !sm2_z_digest_update(desc, y, pbytes)) - ret = 0; - - mpi_free(x); - mpi_free(y); - return ret; -} - -int sm2_compute_z_digest(struct crypto_akcipher *tfm, - const unsigned char *id, size_t id_len, - unsigned char dgst[SM3_DIGEST_SIZE]) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - uint16_t bits_len; - unsigned char entl[2]; - SHASH_DESC_ON_STACK(desc, NULL); - unsigned int pbytes; - - if (id_len > (USHRT_MAX / 8) || !ec->Q) - return -EINVAL; - - bits_len = (uint16_t)(id_len * 8); - entl[0] = bits_len >> 8; - entl[1] = bits_len & 0xff; - - pbytes = MPI_NBYTES(ec->p); - - /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */ - sm3_base_init(desc); - crypto_sm3_update(desc, entl, 2); - crypto_sm3_update(desc, id, id_len); - - if (sm2_z_digest_update(desc, ec->a, pbytes) || - sm2_z_digest_update(desc, ec->b, pbytes) || - sm2_z_digest_update_point(desc, ec->G, ec, pbytes) || - sm2_z_digest_update_point(desc, ec->Q, ec, pbytes)) - return -EINVAL; - - crypto_sm3_final(desc, dgst); - return 0; -} -EXPORT_SYMBOL(sm2_compute_z_digest); - -static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s) -{ - int rc = -EINVAL; - struct gcry_mpi_point sG, tP; - MPI t = NULL; - MPI x1 = NULL, y1 = NULL; - - mpi_point_init(&sG); - mpi_point_init(&tP); - x1 = mpi_new(0); - y1 = mpi_new(0); - t = mpi_new(0); - - /* r, s in [1, n-1] */ - if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 || - mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) { - goto leave; - } - - /* t = (r + s) % n, t == 0 */ - mpi_addm(t, sig_r, sig_s, ec->n); - if (mpi_cmp_ui(t, 0) == 0) - goto leave; - - /* sG + tP = (x1, y1) */ - rc = -EBADMSG; - mpi_ec_mul_point(&sG, sig_s, ec->G, ec); - mpi_ec_mul_point(&tP, t, ec->Q, ec); - mpi_ec_add_points(&sG, &sG, &tP, ec); - if (mpi_ec_get_affine(x1, y1, &sG, ec)) - goto leave; - - /* R = (e + x1) % n */ - mpi_addm(t, hash, x1, ec->n); - - /* check R == r */ - rc = -EKEYREJECTED; - if (mpi_cmp(t, sig_r)) - goto leave; - - rc = 0; - -leave: - mpi_point_free_parts(&sG); - mpi_point_free_parts(&tP); - mpi_free(x1); - mpi_free(y1); - mpi_free(t); - - return rc; -} - -static int sm2_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - unsigned char *buffer; - struct sm2_signature_ctx sig; - MPI hash; - int ret; - - if (unlikely(!ec->Q)) - return -EINVAL; - - buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, req->src_len + req->dst_len), - buffer, req->src_len + req->dst_len, 0); - - sig.sig_r = NULL; - sig.sig_s = NULL; - ret = asn1_ber_decoder(&sm2signature_decoder, &sig, - buffer, req->src_len); - if (ret) - goto error; - - ret = -ENOMEM; - hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len); - if (!hash) - goto error; - - ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s); - - mpi_free(hash); -error: - mpi_free(sig.sig_r); - mpi_free(sig.sig_s); - kfree(buffer); - return ret; -} - -static int sm2_set_pub_key(struct crypto_akcipher *tfm, - const void *key, unsigned int keylen) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - MPI a; - int rc; - - /* include the uncompressed flag '0x04' */ - a = mpi_read_raw_data(key, keylen); - if (!a) - return -ENOMEM; - - mpi_normalize(a); - rc = sm2_ecc_os2ec(ec->Q, a); - mpi_free(a); - - return rc; -} - -static unsigned int sm2_max_size(struct crypto_akcipher *tfm) -{ - /* Unlimited max size */ - return PAGE_SIZE; -} - -static int sm2_init_tfm(struct crypto_akcipher *tfm) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - - return sm2_ec_ctx_init(ec); -} - -static void sm2_exit_tfm(struct crypto_akcipher *tfm) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - - sm2_ec_ctx_deinit(ec); -} - -static struct akcipher_alg sm2 = { - .verify = sm2_verify, - .set_pub_key = sm2_set_pub_key, - .max_size = sm2_max_size, - .init = sm2_init_tfm, - .exit = sm2_exit_tfm, - .base = { - .cra_name = "sm2", - .cra_driver_name = "sm2-generic", - .cra_priority = 100, - .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct mpi_ec_ctx), - }, -}; - -static int sm2_init(void) -{ - return crypto_register_akcipher(&sm2); -} - -static void sm2_exit(void) -{ - crypto_unregister_akcipher(&sm2); -} - -subsys_initcall(sm2_init); -module_exit(sm2_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>"); -MODULE_DESCRIPTION("SM2 generic algorithm"); -MODULE_ALIAS_CRYPTO("sm2-generic"); diff --git a/crypto/sm2signature.asn1 b/crypto/sm2signature.asn1 deleted file mode 100644 index ab8c0b754d21..000000000000 --- a/crypto/sm2signature.asn1 +++ /dev/null @@ -1,4 +0,0 @@ -Sm2Signature ::= SEQUENCE { - sig_r INTEGER ({ sm2_get_signature_r }), - sig_s INTEGER ({ sm2_get_signature_s }) -} diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 193c4584bd00..7529139fcc96 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -5,18 +5,14 @@ * * Copyright (C) 2017 ARM Limited or its affiliates. * Written by Gilad Ben-Yossef <gilad@benyossef.com> + * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> -#include <linux/bitops.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, @@ -26,154 +22,31 @@ const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { }; EXPORT_SYMBOL_GPL(sm3_zero_message_hash); -static inline u32 p0(u32 x) -{ - return x ^ rol32(x, 9) ^ rol32(x, 17); -} - -static inline u32 p1(u32 x) -{ - return x ^ rol32(x, 15) ^ rol32(x, 23); -} - -static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) -{ - return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); -} - -static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g) -{ - return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g)); -} - -static inline u32 t(unsigned int n) -{ - return (n < 16) ? SM3_T1 : SM3_T2; -} - -static void sm3_expand(u32 *t, u32 *w, u32 *wt) -{ - int i; - unsigned int tmp; - - /* load the input */ - for (i = 0; i <= 15; i++) - w[i] = get_unaligned_be32((__u32 *)t + i); - - for (i = 16; i <= 67; i++) { - tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15); - w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6]; - } - - for (i = 0; i <= 63; i++) - wt[i] = w[i] ^ w[i + 4]; -} - -static void sm3_compress(u32 *w, u32 *wt, u32 *m) -{ - u32 ss1; - u32 ss2; - u32 tt1; - u32 tt2; - u32 a, b, c, d, e, f, g, h; - int i; - - a = m[0]; - b = m[1]; - c = m[2]; - d = m[3]; - e = m[4]; - f = m[5]; - g = m[6]; - h = m[7]; - - for (i = 0; i <= 63; i++) { - - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); - - ss2 = ss1 ^ rol32(a, 12); - - tt1 = ff(i, a, b, c) + d + ss2 + *wt; - wt++; - - tt2 = gg(i, e, f, g) + h + ss1 + *w; - w++; - - d = c; - c = rol32(b, 9); - b = a; - a = tt1; - h = g; - g = rol32(f, 19); - f = e; - e = p0(tt2); - } - - m[0] = a ^ m[0]; - m[1] = b ^ m[1]; - m[2] = c ^ m[2]; - m[3] = d ^ m[3]; - m[4] = e ^ m[4]; - m[5] = f ^ m[5]; - m[6] = g ^ m[6]; - m[7] = h ^ m[7]; - - a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; -} - -static void sm3_transform(struct sm3_state *sst, u8 const *src) -{ - unsigned int w[68]; - unsigned int wt[64]; - - sm3_expand((u32 *)src, w, wt); - sm3_compress(w, wt, sst->state); - - memzero_explicit(w, sizeof(w)); - memzero_explicit(wt, sizeof(wt)); -} - -static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src, - int blocks) -{ - while (blocks--) { - sm3_transform(sst, src); - src += SM3_BLOCK_SIZE; - } -} - -int crypto_sm3_update(struct shash_desc *desc, const u8 *data, +static int crypto_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); -} -EXPORT_SYMBOL(crypto_sm3_update); - -int crypto_sm3_final(struct shash_desc *desc, u8 *out) -{ - sm3_base_do_finalize(desc, sm3_generic_block_fn); - return sm3_base_finish(desc, out); + return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic); } -EXPORT_SYMBOL(crypto_sm3_final); -int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, +static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { - sm3_base_do_update(desc, data, len, sm3_generic_block_fn); - return crypto_sm3_final(desc, hash); + sm3_base_do_finup(desc, data, len, sm3_block_generic); + return sm3_base_finish(desc, hash); } -EXPORT_SYMBOL(crypto_sm3_finup); static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = crypto_sm3_update, - .final = crypto_sm3_final, .finup = crypto_sm3_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base = { .cra_name = "sm3", .cra_driver_name = "sm3-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -189,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void) crypto_unregister_shash(&sm3_alg); } -subsys_initcall(sm3_generic_mod_init); +module_init(sm3_generic_mod_init); module_exit(sm3_generic_mod_fini); MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm4.c b/crypto/sm4.c new file mode 100644 index 000000000000..f4cd7edc11f0 --- /dev/null +++ b/crypto/sm4.c @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4, as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2018 ARM Limited or its affiliates. + * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#include <linux/module.h> +#include <linux/unaligned.h> +#include <crypto/sm4.h> + +static const u32 ____cacheline_aligned fk[4] = { + 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc +}; + +static const u32 ____cacheline_aligned ck[32] = { + 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, + 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9, + 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, + 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, + 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, + 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299, + 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, + 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 +}; + +static const u8 ____cacheline_aligned sbox[256] = { + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 +}; + +extern const u32 crypto_sm4_fk[4] __alias(fk); +extern const u32 crypto_sm4_ck[32] __alias(ck); +extern const u8 crypto_sm4_sbox[256] __alias(sbox); + +EXPORT_SYMBOL(crypto_sm4_fk); +EXPORT_SYMBOL(crypto_sm4_ck); +EXPORT_SYMBOL(crypto_sm4_sbox); + +static inline u32 sm4_t_non_lin_sub(u32 x) +{ + u32 out; + + out = (u32)sbox[x & 0xff]; + out |= (u32)sbox[(x >> 8) & 0xff] << 8; + out |= (u32)sbox[(x >> 16) & 0xff] << 16; + out |= (u32)sbox[(x >> 24) & 0xff] << 24; + + return out; +} + +static inline u32 sm4_key_lin_sub(u32 x) +{ + return x ^ rol32(x, 13) ^ rol32(x, 23); +} + +static inline u32 sm4_enc_lin_sub(u32 x) +{ + return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24); +} + +static inline u32 sm4_key_sub(u32 x) +{ + return sm4_key_lin_sub(sm4_t_non_lin_sub(x)); +} + +static inline u32 sm4_enc_sub(u32 x) +{ + return sm4_enc_lin_sub(sm4_t_non_lin_sub(x)); +} + +static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) +{ + return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); +} + + +/** + * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + */ +int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + u32 rk[4]; + const u32 *key = (u32 *)in_key; + int i; + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; + rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; + rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; + rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; + + for (i = 0; i < 32; i += 4) { + rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); + rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); + rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); + rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]); + + ctx->rkey_enc[i + 0] = rk[0]; + ctx->rkey_enc[i + 1] = rk[1]; + ctx->rkey_enc[i + 2] = rk[2]; + ctx->rkey_enc[i + 3] = rk[3]; + ctx->rkey_dec[31 - 0 - i] = rk[0]; + ctx->rkey_dec[31 - 1 - i] = rk[1]; + ctx->rkey_dec[31 - 2 - i] = rk[2]; + ctx->rkey_dec[31 - 3 - i] = rk[3]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sm4_expandkey); + +/** + * sm4_crypt_block - Encrypt or decrypt a single SM4 block + * @rk: The rkey_enc for encrypt or rkey_dec for decrypt + * @out: Buffer to store output data + * @in: Buffer containing the input data + */ +void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in) +{ + u32 x[4], i; + + x[0] = get_unaligned_be32(in + 0 * 4); + x[1] = get_unaligned_be32(in + 1 * 4); + x[2] = get_unaligned_be32(in + 2 * 4); + x[3] = get_unaligned_be32(in + 3 * 4); + + for (i = 0; i < 32; i += 4) { + x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]); + x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]); + x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]); + x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]); + } + + put_unaligned_be32(x[3 - 0], out + 0 * 4); + put_unaligned_be32(x[3 - 1], out + 1 * 4); + put_unaligned_be32(x[3 - 2], out + 2 * 4); + put_unaligned_be32(x[3 - 3], out + 3 * 4); +} +EXPORT_SYMBOL_GPL(sm4_crypt_block); + +MODULE_DESCRIPTION("Generic SM4 library"); +MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 016dbc595705..d57444e8428c 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -7,200 +7,52 @@ * All rights reserved. */ +#include <crypto/algapi.h> #include <crypto/sm4.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> - -static const u32 fk[4] = { - 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc -}; - -static const u8 sbox[256] = { - 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, - 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, - 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, - 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, - 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, - 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, - 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, - 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, - 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, - 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, - 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, - 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, - 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, - 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, - 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, - 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, - 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, - 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, - 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, - 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, - 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, - 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, - 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, - 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, - 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, - 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, - 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, - 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, - 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, - 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, - 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, - 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 -}; - -static const u32 ck[] = { - 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, - 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9, - 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, - 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, - 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, - 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299, - 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, - 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 -}; - -static u32 sm4_t_non_lin_sub(u32 x) -{ - int i; - u8 *b = (u8 *)&x; - - for (i = 0; i < 4; ++i) - b[i] = sbox[b[i]]; - - return x; -} - -static u32 sm4_key_lin_sub(u32 x) -{ - return x ^ rol32(x, 13) ^ rol32(x, 23); - -} - -static u32 sm4_enc_lin_sub(u32 x) -{ - return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24); -} - -static u32 sm4_key_sub(u32 x) -{ - return sm4_key_lin_sub(sm4_t_non_lin_sub(x)); -} - -static u32 sm4_enc_sub(u32 x) -{ - return sm4_enc_lin_sub(sm4_t_non_lin_sub(x)); -} - -static u32 sm4_round(const u32 *x, const u32 rk) -{ - return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk); -} - +#include <linux/unaligned.h> /** - * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016 - * @ctx: The location where the computed key will be stored. - * @in_key: The supplied key. - * @key_len: The length of the supplied key. - * - * Returns 0 on success. The function fails only if an invalid key size (or - * pointer) is supplied. - */ -int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, - unsigned int key_len) -{ - u32 rk[4], t; - const u32 *key = (u32 *)in_key; - int i; - - if (key_len != SM4_KEY_SIZE) - return -EINVAL; - - for (i = 0; i < 4; ++i) - rk[i] = get_unaligned_be32(&key[i]) ^ fk[i]; - - for (i = 0; i < 32; ++i) { - t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]); - ctx->rkey_enc[i] = t; - rk[0] = rk[1]; - rk[1] = rk[2]; - rk[2] = rk[3]; - rk[3] = t; - } - - for (i = 0; i < 32; ++i) - ctx->rkey_dec[i] = ctx->rkey_enc[31 - i]; - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_sm4_expand_key); - -/** - * crypto_sm4_set_key - Set the SM4 key. + * sm4_setkey - Set the SM4 key. * @tfm: The %crypto_tfm that is used in the context. * @in_key: The input key. * @key_len: The size of the key. * - * This function uses crypto_sm4_expand_key() to expand the key. - * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is + * This function uses sm4_expandkey() to expand the key. + * &sm4_ctx _must_ be the private data embedded in @tfm which is * retrieved with crypto_tfm_ctx(). * * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths) */ -int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int sm4_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { - struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); - - return crypto_sm4_expand_key(ctx, in_key, key_len); -} -EXPORT_SYMBOL_GPL(crypto_sm4_set_key); - -static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in) -{ - u32 x[4], i, t; - - for (i = 0; i < 4; ++i) - x[i] = get_unaligned_be32(&in[i]); - - for (i = 0; i < 32; ++i) { - t = sm4_round(x, rk[i]); - x[0] = x[1]; - x[1] = x[2]; - x[2] = x[3]; - x[3] = t; - } + struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); - for (i = 0; i < 4; ++i) - put_unaligned_be32(x[3 - i], &out[i]); + return sm4_expandkey(ctx, in_key, key_len); } /* encrypt a block of text */ -void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); - sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in); + sm4_crypt_block(ctx->rkey_enc, out, in); } -EXPORT_SYMBOL_GPL(crypto_sm4_encrypt); /* decrypt a block of text */ -void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); - sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in); + sm4_crypt_block(ctx->rkey_dec, out, in); } -EXPORT_SYMBOL_GPL(crypto_sm4_decrypt); static struct crypto_alg sm4_alg = { .cra_name = "sm4", @@ -208,15 +60,15 @@ static struct crypto_alg sm4_alg = { .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = SM4_KEY_SIZE, .cia_max_keysize = SM4_KEY_SIZE, - .cia_setkey = crypto_sm4_set_key, - .cia_encrypt = crypto_sm4_encrypt, - .cia_decrypt = crypto_sm4_decrypt + .cia_setkey = sm4_setkey, + .cia_encrypt = sm4_encrypt, + .cia_decrypt = sm4_decrypt } } }; @@ -231,7 +83,7 @@ static void __exit sm4_fini(void) crypto_unregister_alg(&sm4_alg); } -subsys_initcall(sm4_init); +module_init(sm4_init); module_exit(sm4_fini); MODULE_DESCRIPTION("SM4 Cipher Algorithm"); diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c index dc625ffc54ad..57bbf70f4c22 100644 --- a/crypto/streebog_generic.c +++ b/crypto/streebog_generic.c @@ -13,9 +13,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/module.h> -#include <linux/crypto.h> #include <crypto/streebog.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> static const struct streebog_uint512 buffer0 = { { 0, 0, 0, 0, 0, 0, 0, 0 @@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc) return 0; } -static void streebog_pad(struct streebog_state *ctx) -{ - if (ctx->fillsize >= STREEBOG_BLOCK_SIZE) - return; - - memset(ctx->buffer + ctx->fillsize, 0, - sizeof(ctx->buffer) - ctx->fillsize); - - ctx->buffer[ctx->fillsize] = 1; -} - static void streebog_add512(const struct streebog_uint512 *x, const struct streebog_uint512 *y, struct streebog_uint512 *r) @@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data) streebog_add512(&ctx->Sigma, &m, &ctx->Sigma); } -static void streebog_stage3(struct streebog_state *ctx) +static void streebog_stage3(struct streebog_state *ctx, const u8 *src, + unsigned int len) { struct streebog_uint512 buf = { { 0 } }; + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + } u = {}; - buf.qword[0] = cpu_to_le64(ctx->fillsize << 3); - streebog_pad(ctx); + buf.qword[0] = cpu_to_le64(len << 3); + memcpy(u.buffer, src, len); + u.buffer[len] = 1; - streebog_g(&ctx->h, &ctx->N, &ctx->m); + streebog_g(&ctx->h, &ctx->N, &u.m); streebog_add512(&ctx->N, &buf, &ctx->N); - streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma); + streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma); + memzero_explicit(&u, sizeof(u)); streebog_g(&ctx->h, &buffer0, &ctx->N); streebog_g(&ctx->h, &buffer0, &ctx->Sigma); memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512)); @@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct streebog_state *ctx = shash_desc_ctx(desc); - size_t chunksize; - if (ctx->fillsize) { - chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize; - if (chunksize > len) - chunksize = len; - memcpy(&ctx->buffer[ctx->fillsize], data, chunksize); - ctx->fillsize += chunksize; - len -= chunksize; - data += chunksize; - - if (ctx->fillsize == STREEBOG_BLOCK_SIZE) { - streebog_stage2(ctx, ctx->buffer); - ctx->fillsize = 0; - } - } - - while (len >= STREEBOG_BLOCK_SIZE) { + do { streebog_stage2(ctx, data); data += STREEBOG_BLOCK_SIZE; len -= STREEBOG_BLOCK_SIZE; - } + } while (len >= STREEBOG_BLOCK_SIZE); - if (len) { - memcpy(&ctx->buffer, data, len); - ctx->fillsize = len; - } - return 0; + return len; } -static int streebog_final(struct shash_desc *desc, u8 *digest) +static int streebog_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *digest) { struct streebog_state *ctx = shash_desc_ctx(desc); - streebog_stage3(ctx); - ctx->fillsize = 0; + streebog_stage3(ctx, src, len); if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE) memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE); else @@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG256_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog256", .cra_driver_name = "streebog256-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, }, @@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG512_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog512", .cra_driver_name = "streebog512-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -subsys_initcall(streebog_mod_init); +module_init(streebog_mod_init); module_exit(streebog_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f8d06da78e4f..62fef100e599 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> @@ -25,18 +25,21 @@ #include <linux/err.h> #include <linux/fips.h> #include <linux/init.h> -#include <linux/gfp.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/string.h> -#include <linux/moduleparam.h> -#include <linux/jiffies.h> #include <linux/timex.h> -#include <linux/interrupt.h> + +#include "internal.h" #include "tcrypt.h" /* - * Need slab memory for testing (size in number of pages). + * Need slab memory for benchmarking (size in number of pages). */ #define TVMEMSIZE 4 @@ -58,7 +61,7 @@ */ static unsigned int sec; -static char *alg = NULL; +static char *alg; static u32 type; static u32 mask; static int mode; @@ -66,18 +69,7 @@ static u32 num_mb = 8; static unsigned int klen; static char *tvmem[TVMEMSIZE]; -static const char *check[] = { - "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", - "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", - "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", - "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt", - "camellia", "seed", "rmd160", - "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", - "sha3-512", "streebog256", "streebog512", - NULL -}; - -static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 }; +static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 }; static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 }; #define XBUFSIZE 8 @@ -290,6 +282,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, } ret = crypto_aead_setauthsize(tfm, authsize); + if (ret) { + pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo, + ret); + goto out_free_tfm; + } for (i = 0; i < num_mb; ++i) if (testmgr_alloc_buf(data[i].xbuf)) { @@ -315,7 +312,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, for (i = 0; i < num_mb; ++i) { data[i].req = aead_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { - pr_err("alg: skcipher: Failed to allocate request for %s\n", + pr_err("alg: aead: Failed to allocate request for %s\n", algo); while (i--) aead_request_free(data[i].req); @@ -330,7 +327,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, crypto_req_done, &data[i].wait); } - pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + pr_info("testing speed of multibuffer %s (%s) %s\n", algo, get_driver_name(crypto_aead, tfm), e); i = 0; @@ -512,8 +509,8 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) out: if (ret == 0) - printk("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / 8, blen); + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / 8, blen); return ret; } @@ -567,16 +564,22 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, sgout = &sg[9]; tfm = crypto_alloc_aead(algo, 0, 0); - if (IS_ERR(tfm)) { pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); goto out_notfm; } + ret = crypto_aead_setauthsize(tfm, authsize); + if (ret) { + pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo, + ret); + goto out_noreq; + } + crypto_init_wait(&wait); - printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, - get_driver_name(crypto_aead, tfm), e); + pr_info("testing speed of %s (%s) %s\n", algo, + get_driver_name(crypto_aead, tfm), e); req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -611,26 +614,24 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, break; } } + ret = crypto_aead_setkey(tfm, key, *keysize); - ret = crypto_aead_setauthsize(tfm, authsize); + if (ret) { + pr_err("setkey() failed flags=%x: %d\n", + crypto_aead_get_flags(tfm), ret); + goto out; + } iv_len = crypto_aead_ivsize(tfm); if (iv_len) memset(iv, 0xff, iv_len); crypto_aead_clear_flags(tfm, ~0); - printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", - i, *keysize * 8, bs); - + pr_info("test %u (%d bit key, %d byte blocks): ", + i, *keysize * 8, bs); memset(tvmem[0], 0xff, PAGE_SIZE); - if (ret) { - pr_err("setkey() failed flags=%x\n", - crypto_aead_get_flags(tfm)); - goto out; - } - sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize), assoc, aad_size); @@ -715,200 +716,6 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return crypto_wait_req(ret, wait); } -struct test_mb_ahash_data { - struct scatterlist sg[XBUFSIZE]; - char result[64]; - struct ahash_request *req; - struct crypto_wait wait; - char *xbuf[XBUFSIZE]; -}; - -static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb, - int *rc) -{ - int i, err = 0; - - /* Fire up a bunch of concurrent requests */ - for (i = 0; i < num_mb; i++) - rc[i] = crypto_ahash_digest(data[i].req); - - /* Wait for all requests to finish */ - for (i = 0; i < num_mb; i++) { - rc[i] = crypto_wait_req(rc[i], &data[i].wait); - - if (rc[i]) { - pr_info("concurrent request %d error %d\n", i, rc[i]); - err = rc[i]; - } - } - - return err; -} - -static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, - int secs, u32 num_mb) -{ - unsigned long start, end; - int bcount; - int ret = 0; - int *rc; - - rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - for (start = jiffies, end = start + secs * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - ret = do_mult_ahash_op(data, num_mb, rc); - if (ret) - goto out; - } - - pr_cont("%d operations in %d seconds (%llu bytes)\n", - bcount * num_mb, secs, (u64)bcount * blen * num_mb); - -out: - kfree(rc); - return ret; -} - -static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, - u32 num_mb) -{ - unsigned long cycles = 0; - int ret = 0; - int i; - int *rc; - - rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - /* Warm-up run. */ - for (i = 0; i < 4; i++) { - ret = do_mult_ahash_op(data, num_mb, rc); - if (ret) - goto out; - } - - /* The real thing. */ - for (i = 0; i < 8; i++) { - cycles_t start, end; - - start = get_cycles(); - ret = do_mult_ahash_op(data, num_mb, rc); - end = get_cycles(); - - if (ret) - goto out; - - cycles += end - start; - } - - pr_cont("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / (8 * num_mb), blen); - -out: - kfree(rc); - return ret; -} - -static void test_mb_ahash_speed(const char *algo, unsigned int secs, - struct hash_speed *speed, u32 num_mb) -{ - struct test_mb_ahash_data *data; - struct crypto_ahash *tfm; - unsigned int i, j, k; - int ret; - - data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); - if (!data) - return; - - tfm = crypto_alloc_ahash(algo, 0, 0); - if (IS_ERR(tfm)) { - pr_err("failed to load transform for %s: %ld\n", - algo, PTR_ERR(tfm)); - goto free_data; - } - - for (i = 0; i < num_mb; ++i) { - if (testmgr_alloc_buf(data[i].xbuf)) - goto out; - - crypto_init_wait(&data[i].wait); - - data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!data[i].req) { - pr_err("alg: hash: Failed to allocate request for %s\n", - algo); - goto out; - } - - ahash_request_set_callback(data[i].req, 0, crypto_req_done, - &data[i].wait); - - sg_init_table(data[i].sg, XBUFSIZE); - for (j = 0; j < XBUFSIZE; j++) { - sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); - memset(data[i].xbuf[j], 0xff, PAGE_SIZE); - } - } - - pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); - - for (i = 0; speed[i].blen != 0; i++) { - /* For some reason this only tests digests. */ - if (speed[i].blen != speed[i].plen) - continue; - - if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { - pr_err("template (%u) too big for tvmem (%lu)\n", - speed[i].blen, XBUFSIZE * PAGE_SIZE); - goto out; - } - - if (klen) - crypto_ahash_setkey(tfm, tvmem[0], klen); - - for (k = 0; k < num_mb; k++) - ahash_request_set_crypt(data[k].req, data[k].sg, - data[k].result, speed[i].blen); - - pr_info("test%3u " - "(%5u byte blocks,%5u bytes per update,%4u updates): ", - i, speed[i].blen, speed[i].plen, - speed[i].blen / speed[i].plen); - - if (secs) { - ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, - num_mb); - cond_resched(); - } else { - ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); - } - - - if (ret) { - pr_err("At least one hashing failed ret=%d\n", ret); - break; - } - } - -out: - for (k = 0; k < num_mb; ++k) - ahash_request_free(data[k].req); - - for (k = 0; k < num_mb; ++k) - testmgr_free_buf(data[k].xbuf); - - crypto_free_ahash(tfm); - -free_data: - kfree(data); -} - static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, char *out, int secs) { @@ -923,8 +730,8 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, return ret; } - printk("%6u opers/sec, %9lu bytes/sec\n", - bcount / secs, ((long)bcount * blen) / secs); + pr_cont("%6u opers/sec, %9lu bytes/sec\n", + bcount / secs, ((long)bcount * blen) / secs); return 0; } @@ -1073,8 +880,8 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, return; } - printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); + pr_info("testing speed of async %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) { pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm), @@ -1286,15 +1093,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -1313,7 +1111,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, crypto_init_wait(&data[i].wait); } - pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + pr_info("testing speed of multibuffer %s (%s) %s\n", algo, get_driver_name(crypto_skcipher, tfm), e); i = 0; @@ -1324,7 +1122,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, if (bs > XBUFSIZE * PAGE_SIZE) { pr_err("template (%u) too big for buffer (%lu)\n", - *b_size, XBUFSIZE * PAGE_SIZE); + bs, XBUFSIZE * PAGE_SIZE); goto out; } @@ -1377,8 +1175,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, memset(cur->xbuf[p], 0xff, k); skcipher_request_set_crypt(cur->req, cur->sg, - cur->sg, *b_size, - iv); + cur->sg, bs, iv); } if (secs) { @@ -1521,13 +1318,12 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, return; } - pr_info("\ntesting speed of %s %s (%s) %s\n", async ? "async" : "sync", + pr_info("testing speed of %s %s (%s) %s\n", async ? "async" : "sync", algo, get_driver_name(crypto_skcipher, tfm), e); req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { - pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", - algo); + pr_err("skcipher: Failed to allocate request for %s\n", algo); goto out; } @@ -1640,18 +1436,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs, false); } -static void test_available(void) -{ - const char **name = check; - - while (*name) { - printk("alg %s ", *name); - printk(crypto_has_alg(*name, 0, 0) ? - "found\n" : "not found\n"); - name++; - } -} - static inline int tcrypt_test(const char *alg) { int ret; @@ -1659,8 +1443,8 @@ static inline int tcrypt_test(const char *alg) pr_debug("testing %s\n", alg); ret = alg_test(alg, alg, 0, 0); - /* non-fips algs return -EINVAL in fips mode */ - if (fips_enabled && ret == -EINVAL) + /* non-fips algs return -EINVAL or -ECANCELED in fips mode */ + if (fips_enabled && (ret == -EINVAL || ret == -ECANCELED)) ret = 0; return ret; } @@ -1680,358 +1464,379 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) } for (i = 1; i < 200; i++) - ret += do_test(NULL, 0, 0, i, num_mb); + ret = min(ret, do_test(NULL, 0, 0, i, num_mb)); break; case 1: - ret += tcrypt_test("md5"); + ret = min(ret, tcrypt_test("md5")); break; case 2: - ret += tcrypt_test("sha1"); + ret = min(ret, tcrypt_test("sha1")); break; case 3: - ret += tcrypt_test("ecb(des)"); - ret += tcrypt_test("cbc(des)"); - ret += tcrypt_test("ctr(des)"); + ret = min(ret, tcrypt_test("ecb(des)")); + ret = min(ret, tcrypt_test("cbc(des)")); + ret = min(ret, tcrypt_test("ctr(des)")); break; case 4: - ret += tcrypt_test("ecb(des3_ede)"); - ret += tcrypt_test("cbc(des3_ede)"); - ret += tcrypt_test("ctr(des3_ede)"); + ret = min(ret, tcrypt_test("ecb(des3_ede)")); + ret = min(ret, tcrypt_test("cbc(des3_ede)")); + ret = min(ret, tcrypt_test("ctr(des3_ede)")); break; case 5: - ret += tcrypt_test("md4"); + ret = min(ret, tcrypt_test("md4")); break; case 6: - ret += tcrypt_test("sha256"); + ret = min(ret, tcrypt_test("sha256")); break; case 7: - ret += tcrypt_test("ecb(blowfish)"); - ret += tcrypt_test("cbc(blowfish)"); - ret += tcrypt_test("ctr(blowfish)"); + ret = min(ret, tcrypt_test("ecb(blowfish)")); + ret = min(ret, tcrypt_test("cbc(blowfish)")); + ret = min(ret, tcrypt_test("ctr(blowfish)")); break; case 8: - ret += tcrypt_test("ecb(twofish)"); - ret += tcrypt_test("cbc(twofish)"); - ret += tcrypt_test("ctr(twofish)"); - ret += tcrypt_test("lrw(twofish)"); - ret += tcrypt_test("xts(twofish)"); + ret = min(ret, tcrypt_test("ecb(twofish)")); + ret = min(ret, tcrypt_test("cbc(twofish)")); + ret = min(ret, tcrypt_test("ctr(twofish)")); + ret = min(ret, tcrypt_test("lrw(twofish)")); + ret = min(ret, tcrypt_test("xts(twofish)")); break; case 9: - ret += tcrypt_test("ecb(serpent)"); - ret += tcrypt_test("cbc(serpent)"); - ret += tcrypt_test("ctr(serpent)"); - ret += tcrypt_test("lrw(serpent)"); - ret += tcrypt_test("xts(serpent)"); + ret = min(ret, tcrypt_test("ecb(serpent)")); + ret = min(ret, tcrypt_test("cbc(serpent)")); + ret = min(ret, tcrypt_test("ctr(serpent)")); + ret = min(ret, tcrypt_test("lrw(serpent)")); + ret = min(ret, tcrypt_test("xts(serpent)")); break; case 10: - ret += tcrypt_test("ecb(aes)"); - ret += tcrypt_test("cbc(aes)"); - ret += tcrypt_test("lrw(aes)"); - ret += tcrypt_test("xts(aes)"); - ret += tcrypt_test("ctr(aes)"); - ret += tcrypt_test("rfc3686(ctr(aes))"); - ret += tcrypt_test("ofb(aes)"); - ret += tcrypt_test("cfb(aes)"); + ret = min(ret, tcrypt_test("ecb(aes)")); + ret = min(ret, tcrypt_test("cbc(aes)")); + ret = min(ret, tcrypt_test("lrw(aes)")); + ret = min(ret, tcrypt_test("xts(aes)")); + ret = min(ret, tcrypt_test("ctr(aes)")); + ret = min(ret, tcrypt_test("rfc3686(ctr(aes))")); + ret = min(ret, tcrypt_test("xctr(aes)")); break; case 11: - ret += tcrypt_test("sha384"); + ret = min(ret, tcrypt_test("sha384")); break; case 12: - ret += tcrypt_test("sha512"); + ret = min(ret, tcrypt_test("sha512")); break; case 13: - ret += tcrypt_test("deflate"); + ret = min(ret, tcrypt_test("deflate")); break; case 14: - ret += tcrypt_test("ecb(cast5)"); - ret += tcrypt_test("cbc(cast5)"); - ret += tcrypt_test("ctr(cast5)"); + ret = min(ret, tcrypt_test("ecb(cast5)")); + ret = min(ret, tcrypt_test("cbc(cast5)")); + ret = min(ret, tcrypt_test("ctr(cast5)")); break; case 15: - ret += tcrypt_test("ecb(cast6)"); - ret += tcrypt_test("cbc(cast6)"); - ret += tcrypt_test("ctr(cast6)"); - ret += tcrypt_test("lrw(cast6)"); - ret += tcrypt_test("xts(cast6)"); + ret = min(ret, tcrypt_test("ecb(cast6)")); + ret = min(ret, tcrypt_test("cbc(cast6)")); + ret = min(ret, tcrypt_test("ctr(cast6)")); + ret = min(ret, tcrypt_test("lrw(cast6)")); + ret = min(ret, tcrypt_test("xts(cast6)")); break; case 16: - ret += tcrypt_test("ecb(arc4)"); + ret = min(ret, tcrypt_test("ecb(arc4)")); break; case 17: - ret += tcrypt_test("michael_mic"); + ret = min(ret, tcrypt_test("michael_mic")); break; case 18: - ret += tcrypt_test("crc32c"); + ret = min(ret, tcrypt_test("crc32c")); break; case 19: - ret += tcrypt_test("ecb(tea)"); + ret = min(ret, tcrypt_test("ecb(tea)")); break; case 20: - ret += tcrypt_test("ecb(xtea)"); + ret = min(ret, tcrypt_test("ecb(xtea)")); break; case 21: - ret += tcrypt_test("ecb(khazad)"); + ret = min(ret, tcrypt_test("ecb(khazad)")); break; case 22: - ret += tcrypt_test("wp512"); + ret = min(ret, tcrypt_test("wp512")); break; case 23: - ret += tcrypt_test("wp384"); + ret = min(ret, tcrypt_test("wp384")); break; case 24: - ret += tcrypt_test("wp256"); + ret = min(ret, tcrypt_test("wp256")); break; case 26: - ret += tcrypt_test("ecb(anubis)"); - ret += tcrypt_test("cbc(anubis)"); + ret = min(ret, tcrypt_test("ecb(anubis)")); + ret = min(ret, tcrypt_test("cbc(anubis)")); break; case 30: - ret += tcrypt_test("ecb(xeta)"); + ret = min(ret, tcrypt_test("ecb(xeta)")); break; case 31: - ret += tcrypt_test("pcbc(fcrypt)"); + ret = min(ret, tcrypt_test("pcbc(fcrypt)")); break; case 32: - ret += tcrypt_test("ecb(camellia)"); - ret += tcrypt_test("cbc(camellia)"); - ret += tcrypt_test("ctr(camellia)"); - ret += tcrypt_test("lrw(camellia)"); - ret += tcrypt_test("xts(camellia)"); + ret = min(ret, tcrypt_test("ecb(camellia)")); + ret = min(ret, tcrypt_test("cbc(camellia)")); + ret = min(ret, tcrypt_test("ctr(camellia)")); + ret = min(ret, tcrypt_test("lrw(camellia)")); + ret = min(ret, tcrypt_test("xts(camellia)")); break; case 33: - ret += tcrypt_test("sha224"); + ret = min(ret, tcrypt_test("sha224")); break; case 35: - ret += tcrypt_test("gcm(aes)"); + ret = min(ret, tcrypt_test("gcm(aes)")); break; case 36: - ret += tcrypt_test("lzo"); + ret = min(ret, tcrypt_test("lzo")); break; case 37: - ret += tcrypt_test("ccm(aes)"); + ret = min(ret, tcrypt_test("ccm(aes)")); break; case 38: - ret += tcrypt_test("cts(cbc(aes))"); + ret = min(ret, tcrypt_test("cts(cbc(aes))")); break; case 39: - ret += tcrypt_test("xxhash64"); + ret = min(ret, tcrypt_test("xxhash64")); break; case 40: - ret += tcrypt_test("rmd160"); - break; - - case 41: - ret += tcrypt_test("blake2s-256"); + ret = min(ret, tcrypt_test("rmd160")); break; case 42: - ret += tcrypt_test("blake2b-512"); + ret = min(ret, tcrypt_test("blake2b-512")); break; case 43: - ret += tcrypt_test("ecb(seed)"); + ret = min(ret, tcrypt_test("ecb(seed)")); break; case 45: - ret += tcrypt_test("rfc4309(ccm(aes))"); + ret = min(ret, tcrypt_test("rfc4309(ccm(aes))")); break; case 46: - ret += tcrypt_test("ghash"); - break; - - case 47: - ret += tcrypt_test("crct10dif"); + ret = min(ret, tcrypt_test("ghash")); break; case 48: - ret += tcrypt_test("sha3-224"); + ret = min(ret, tcrypt_test("sha3-224")); break; case 49: - ret += tcrypt_test("sha3-256"); + ret = min(ret, tcrypt_test("sha3-256")); break; case 50: - ret += tcrypt_test("sha3-384"); + ret = min(ret, tcrypt_test("sha3-384")); break; case 51: - ret += tcrypt_test("sha3-512"); + ret = min(ret, tcrypt_test("sha3-512")); break; case 52: - ret += tcrypt_test("sm3"); + ret = min(ret, tcrypt_test("sm3")); break; case 53: - ret += tcrypt_test("streebog256"); + ret = min(ret, tcrypt_test("streebog256")); break; case 54: - ret += tcrypt_test("streebog512"); + ret = min(ret, tcrypt_test("streebog512")); + break; + + case 55: + ret = min(ret, tcrypt_test("gcm(sm4)")); + break; + + case 56: + ret = min(ret, tcrypt_test("ccm(sm4)")); + break; + + case 58: + ret = min(ret, tcrypt_test("gcm(aria)")); + break; + + case 59: + ret = min(ret, tcrypt_test("cts(cbc(sm4))")); break; case 100: - ret += tcrypt_test("hmac(md5)"); + ret = min(ret, tcrypt_test("hmac(md5)")); break; case 101: - ret += tcrypt_test("hmac(sha1)"); + ret = min(ret, tcrypt_test("hmac(sha1)")); break; case 102: - ret += tcrypt_test("hmac(sha256)"); + ret = min(ret, tcrypt_test("hmac(sha256)")); break; case 103: - ret += tcrypt_test("hmac(sha384)"); + ret = min(ret, tcrypt_test("hmac(sha384)")); break; case 104: - ret += tcrypt_test("hmac(sha512)"); + ret = min(ret, tcrypt_test("hmac(sha512)")); break; case 105: - ret += tcrypt_test("hmac(sha224)"); + ret = min(ret, tcrypt_test("hmac(sha224)")); break; case 106: - ret += tcrypt_test("xcbc(aes)"); + ret = min(ret, tcrypt_test("xcbc(aes)")); break; case 108: - ret += tcrypt_test("hmac(rmd160)"); - break; - - case 109: - ret += tcrypt_test("vmac64(aes)"); + ret = min(ret, tcrypt_test("hmac(rmd160)")); break; case 111: - ret += tcrypt_test("hmac(sha3-224)"); + ret = min(ret, tcrypt_test("hmac(sha3-224)")); break; case 112: - ret += tcrypt_test("hmac(sha3-256)"); + ret = min(ret, tcrypt_test("hmac(sha3-256)")); break; case 113: - ret += tcrypt_test("hmac(sha3-384)"); + ret = min(ret, tcrypt_test("hmac(sha3-384)")); break; case 114: - ret += tcrypt_test("hmac(sha3-512)"); + ret = min(ret, tcrypt_test("hmac(sha3-512)")); break; case 115: - ret += tcrypt_test("hmac(streebog256)"); + ret = min(ret, tcrypt_test("hmac(streebog256)")); break; case 116: - ret += tcrypt_test("hmac(streebog512)"); - break; - - case 150: - ret += tcrypt_test("ansi_cprng"); + ret = min(ret, tcrypt_test("hmac(streebog512)")); break; case 151: - ret += tcrypt_test("rfc4106(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4106(gcm(aes))")); break; case 152: - ret += tcrypt_test("rfc4543(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4543(gcm(aes))")); break; case 153: - ret += tcrypt_test("cmac(aes)"); + ret = min(ret, tcrypt_test("cmac(aes)")); break; case 154: - ret += tcrypt_test("cmac(des3_ede)"); + ret = min(ret, tcrypt_test("cmac(des3_ede)")); break; case 155: - ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))")); break; case 156: - ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))")); break; case 157: - ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))")); + break; + + case 158: + ret = min(ret, tcrypt_test("cbcmac(sm4)")); + break; + + case 159: + ret = min(ret, tcrypt_test("cmac(sm4)")); + break; + + case 160: + ret = min(ret, tcrypt_test("xcbc(sm4)")); break; + case 181: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))")); break; case 182: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))")); break; case 183: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))")); break; case 184: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))")); break; case 185: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))")); break; case 186: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))")); break; case 187: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))")); break; case 188: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))")); break; case 189: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))")); break; case 190: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))")); break; case 191: - ret += tcrypt_test("ecb(sm4)"); - ret += tcrypt_test("cbc(sm4)"); - ret += tcrypt_test("ctr(sm4)"); + ret = min(ret, tcrypt_test("ecb(sm4)")); + ret = min(ret, tcrypt_test("cbc(sm4)")); + ret = min(ret, tcrypt_test("ctr(sm4)")); + ret = min(ret, tcrypt_test("xts(sm4)")); + break; + case 192: + ret = min(ret, tcrypt_test("ecb(aria)")); + ret = min(ret, tcrypt_test("cbc(aria)")); + ret = min(ret, tcrypt_test("ctr(aria)")); + break; + case 193: + ret = min(ret, tcrypt_test("ffdhe2048(dh)")); break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, @@ -2058,10 +1863,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16_24_32); test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); - test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, - speed_template_16_24_32); - test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, - speed_template_16_24_32); break; case 201: @@ -2225,11 +2026,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) case 211: test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, - NULL, 0, 16, 16, aead_speed_template_20); + NULL, 0, 16, 16, aead_speed_template_20_28_36); test_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, speed_template_16_24_32); test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, - NULL, 0, 16, 16, aead_speed_template_20); + NULL, 0, 16, 16, aead_speed_template_20_28_36); test_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8, speed_template_16_24_32); break; @@ -2255,11 +2056,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) case 215: test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, - 0, 16, 16, aead_speed_template_20, num_mb); + 0, 16, 16, aead_speed_template_20_28_36, num_mb); test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, speed_template_16_24_32, num_mb); test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL, - 0, 16, 16, aead_speed_template_20, num_mb); + 0, 16, 16, aead_speed_template_20_28_36, num_mb); test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8, speed_template_16_24_32, num_mb); break; @@ -2289,10 +2090,18 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16); test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0, speed_template_16); + test_cipher_speed("cts(cbc(sm4))", ENCRYPT, sec, NULL, 0, + speed_template_16); + test_cipher_speed("cts(cbc(sm4))", DECRYPT, sec, NULL, 0, + speed_template_16); test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0, speed_template_16); test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0, speed_template_16); + test_cipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_32); + test_cipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0, + speed_template_32); break; case 219: @@ -2322,6 +2131,68 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) NULL, 0, 16, 8, speed_template_16); break; + case 222: + test_aead_speed("gcm(sm4)", ENCRYPT, sec, + NULL, 0, 16, 8, speed_template_16); + test_aead_speed("gcm(sm4)", DECRYPT, sec, + NULL, 0, 16, 8, speed_template_16); + break; + + case 223: + test_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, + NULL, 0, 16, 16, aead_speed_template_19); + test_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, + NULL, 0, 16, 16, aead_speed_template_19); + break; + + case 224: + test_mb_aead_speed("gcm(sm4)", ENCRYPT, sec, NULL, 0, 16, 8, + speed_template_16, num_mb); + test_mb_aead_speed("gcm(sm4)", DECRYPT, sec, NULL, 0, 16, 8, + speed_template_16, num_mb); + break; + + case 225: + test_mb_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, NULL, 0, + 16, 16, aead_speed_template_19, num_mb); + test_mb_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, NULL, 0, + 16, 16, aead_speed_template_19, num_mb); + break; + + case 226: + test_cipher_speed("hctr2(aes)", ENCRYPT, sec, NULL, + 0, speed_template_32); + break; + + case 227: + test_cipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cbc(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + break; + + case 228: + test_aead_speed("gcm(aria)", ENCRYPT, sec, + NULL, 0, 16, 8, speed_template_16_24_32); + test_aead_speed("gcm(aria)", DECRYPT, sec, + NULL, 0, 16, 8, speed_template_16_24_32); + break; + + case 229: + test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8, + speed_template_16, num_mb); + test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8, + speed_template_16, num_mb); + break; + case 300: if (alg) { test_hash_speed(alg, sec, generic_hash_speed_template); @@ -2376,10 +2247,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 316: - test_hash_speed("blake2s-256", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 317: test_hash_speed("blake2b-512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2393,14 +2260,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 320: - test_hash_speed("crct10dif", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; - case 321: - test_hash_speed("poly1305", sec, poly1305_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 322: test_hash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2488,10 +2347,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 416: - test_ahash_speed("blake2s-256", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 417: test_ahash_speed("blake2b-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; @@ -2513,33 +2368,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) if (mode > 400 && mode < 500) break; fallthrough; case 422: - test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 423: - test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 424: - test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 425: - test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 426: - test_mb_ahash_speed("streebog256", sec, - generic_hash_speed_template, num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 427: - test_mb_ahash_speed("streebog512", sec, - generic_hash_speed_template, num_mb); + test_ahash_speed("sm3", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; case 499: @@ -2570,14 +2399,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16_24_32); test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); - test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, - speed_template_16_24_32); - test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, - speed_template_16_24_32); - test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, - speed_template_16_24_32); - test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, - speed_template_16_24_32); test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0, speed_template_20_28_36); test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0, @@ -2597,18 +2418,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_acipher_speed("cbc(des3_ede)", DECRYPT, sec, des3_speed_template, DES3_SPEED_VECTORS, speed_template_24); - test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24); - test_acipher_speed("cfb(des3_ede)", DECRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24); - test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24); - test_acipher_speed("ofb(des3_ede)", DECRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24); break; case 502: @@ -2620,14 +2429,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_8); test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, speed_template_8); - test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, - speed_template_8); - test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, - speed_template_8); - test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, - speed_template_8); - test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, - speed_template_8); break; case 503: @@ -2757,7 +2558,46 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_8_32); break; + case 518: + test_acipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0, + speed_template_16); + test_acipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_32); + test_acipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0, + speed_template_32); + break; + + case 519: + test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + break; + case 600: + if (alg) { + u8 speed_template[2] = {klen, 0}; + test_mb_skcipher_speed(alg, ENCRYPT, sec, NULL, 0, + speed_template, num_mb); + test_mb_skcipher_speed(alg, DECRYPT, sec, NULL, 0, + speed_template, num_mb); + break; + } + test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32, num_mb); test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, @@ -2782,14 +2622,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16_24_32, num_mb); test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32, num_mb); - test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, - speed_template_16_24_32, num_mb); - test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, - speed_template_16_24_32, num_mb); - test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, - speed_template_16_24_32, num_mb); - test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, - speed_template_16_24_32, num_mb); test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0, speed_template_20_28_36, num_mb); test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, @@ -2809,18 +2641,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec, des3_speed_template, DES3_SPEED_VECTORS, speed_template_24, num_mb); - test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24, num_mb); - test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24, num_mb); - test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24, num_mb); - test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec, - des3_speed_template, DES3_SPEED_VECTORS, - speed_template_24, num_mb); break; case 602: @@ -2832,14 +2652,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_8, num_mb); test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, speed_template_8, num_mb); - test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, - speed_template_8, num_mb); - test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, - speed_template_8, num_mb); - test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, - speed_template_8, num_mb); - test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, - speed_template_8, num_mb); break; case 603: @@ -2969,9 +2781,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_8_32, num_mb); break; - case 1000: - test_available(); + case 610: + test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); break; + } return ret; @@ -2991,7 +2811,7 @@ static int __init tcrypt_mod_init(void) err = do_test(alg, type, mask, mode, num_mb); if (err) { - printk(KERN_ERR "tcrypt: one or more tests failed!\n"); + pr_err("one or more tests failed!\n"); goto err_free_tv; } else { pr_debug("all tests passed\n"); @@ -3036,5 +2856,5 @@ module_param(klen, uint, 0); MODULE_PARM_DESC(klen, "Key length (defaults to 0)"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Quick & dirty crypto testing module"); +MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 9f654677172a..85c3f77bcfb4 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> @@ -62,7 +62,7 @@ static u8 speed_template_32[] = {32, 0}; * AEAD speed tests */ static u8 aead_speed_template_19[] = {19, 0}; -static u8 aead_speed_template_20[] = {20, 0}; +static u8 aead_speed_template_20_28_36[] = {20, 28, 36, 0}; static u8 aead_speed_template_36[] = {36, 0}; /* @@ -96,22 +96,4 @@ static struct hash_speed generic_hash_speed_template[] = { { .blen = 0, .plen = 0, } }; -static struct hash_speed poly1305_speed_template[] = { - { .blen = 96, .plen = 16, }, - { .blen = 96, .plen = 32, }, - { .blen = 96, .plen = 96, }, - { .blen = 288, .plen = 16, }, - { .blen = 288, .plen = 32, }, - { .blen = 288, .plen = 288, }, - { .blen = 1056, .plen = 32, }, - { .blen = 1056, .plen = 1056, }, - { .blen = 2080, .plen = 32, }, - { .blen = 2080, .plen = 2080, }, - { .blen = 4128, .plen = 4128, }, - { .blen = 8224, .plen = 8224, }, - - /* End marker */ - { .blen = 0, .plen = 0, } -}; - #endif /* _CRYPTO_TCRYPT_H */ diff --git a/crypto/tea.c b/crypto/tea.c index 02efc5d81690..cb05140e3470 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -14,11 +14,11 @@ * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com */ +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> -#include <linux/crypto.h> +#include <linux/unaligned.h> #include <linux/types.h> #define TEA_KEY_SIZE 16 @@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) sum -= TEA_DELTA; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); @@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } @@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; @@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static struct crypto_alg tea_algs[3] = { { @@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct tea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = TEA_KEY_SIZE, @@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, @@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, @@ -272,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea"); MODULE_ALIAS_CRYPTO("xtea"); MODULE_ALIAS_CRYPTO("xeta"); -subsys_initcall(tea_mod_init); +module_init(tea_mod_init); module_exit(tea_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c978e41f11a1..a302be53896d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -23,7 +23,7 @@ #include <linux/fips.h> #include <linux/module.h> #include <linux/once.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> @@ -33,34 +33,32 @@ #include <crypto/akcipher.h> #include <crypto/kpp.h> #include <crypto/acompress.h> +#include <crypto/sig.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include "internal.h" -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); static bool notests; module_param(notests, bool, 0644); -MODULE_PARM_DESC(notests, "disable crypto self-tests"); +MODULE_PARM_DESC(notests, "disable all crypto self-tests"); -static bool panic_on_fail; -module_param(panic_on_fail, bool, 0444); - -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS -static bool noextratests; -module_param(noextratests, bool, 0644); -MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests"); +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL +static bool noslowtests; +module_param(noslowtests, bool, 0644); +MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests"); static unsigned int fuzz_iterations = 100; module_param(fuzz_iterations, uint, 0644); MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); - -DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test); -EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test); +#else +#define noslowtests 1 +#define fuzz_iterations 0 #endif -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifndef CONFIG_CRYPTO_SELFTESTS /* a perfect nop */ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) @@ -119,11 +117,6 @@ struct hash_test_suite { unsigned int count; }; -struct cprng_test_suite { - const struct cprng_testvec *vecs; - unsigned int count; -}; - struct drbg_test_suite { const struct drbg_testvec *vecs; unsigned int count; @@ -134,6 +127,11 @@ struct akcipher_test_suite { unsigned int count; }; +struct sig_test_suite { + const struct sig_testvec *vecs; + unsigned int count; +}; + struct kpp_test_suite { const struct kpp_testvec *vecs; unsigned int count; @@ -151,9 +149,9 @@ struct alg_test_desc { struct cipher_test_suite cipher; struct comp_test_suite comp; struct hash_test_suite hash; - struct cprng_test_suite cprng; struct drbg_test_suite drbg; struct akcipher_test_suite akcipher; + struct sig_test_suite sig; struct kpp_test_suite kpp; } suite; }; @@ -235,6 +233,20 @@ enum finalization_type { FINALIZATION_TYPE_DIGEST, /* use digest() */ }; +/* + * Whether the crypto operation will occur in-place, and if so whether the + * source and destination scatterlist pointers will coincide (req->src == + * req->dst), or whether they'll merely point to two separate scatterlists + * (req->src != req->dst) that reference the same underlying memory. + * + * This is only relevant for algorithm types that support in-place operation. + */ +enum inplace_mode { + OUT_OF_PLACE, + INPLACE_ONE_SGLIST, + INPLACE_TWO_SGLISTS, +}; + #define TEST_SG_TOTAL 10000 /** @@ -268,7 +280,7 @@ struct test_sg_division { * crypto test vector can be tested. * * @name: name of this config, logged for debugging purposes if a test fails - * @inplace: operate on the data in-place, if applicable for the algorithm type? + * @inplace_mode: whether and how to operate on the data in-place, if applicable * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP * @src_divs: description of how to arrange the source scatterlist * @dst_divs: description of how to arrange the dst scatterlist, if applicable @@ -282,10 +294,14 @@ struct test_sg_division { * the @key_offset * @finalization_type: what finalization function to use for hashes * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP. + * This applies to the parts of the operation that aren't controlled + * individually by @nosimd_setkey or @src_divs[].nosimd. + * @nosimd_setkey: set the key (if applicable) with SIMD disabled? Requires + * !CRYPTO_TFM_REQ_MAY_SLEEP. */ struct testvec_config { const char *name; - bool inplace; + enum inplace_mode inplace_mode; u32 req_flags; struct test_sg_division src_divs[XBUFSIZE]; struct test_sg_division dst_divs[XBUFSIZE]; @@ -295,26 +311,31 @@ struct testvec_config { bool key_offset_relative_to_alignmask; enum finalization_type finalization_type; bool nosimd; + bool nosimd_setkey; }; #define TESTVEC_CONFIG_NAMELEN 192 /* * The following are the lists of testvec_configs to test for each algorithm - * type when the basic crypto self-tests are enabled, i.e. when - * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test - * coverage, while keeping the test time much shorter than the full fuzz tests - * so that the basic tests can be enabled in a wider range of circumstances. + * type when the "fast" crypto self-tests are enabled. They aim to provide good + * test coverage, while keeping the test time much shorter than the "full" tests + * so that the "fast" tests can be enabled in a wider range of circumstances. */ /* Configs for skciphers and aeads */ static const struct testvec_config default_cipher_testvec_configs[] = { { - .name = "in-place", - .inplace = true, + .name = "in-place (one sglist)", + .inplace_mode = INPLACE_ONE_SGLIST, + .src_divs = { { .proportion_of_total = 10000 } }, + }, { + .name = "in-place (two sglists)", + .inplace_mode = INPLACE_TWO_SGLISTS, .src_divs = { { .proportion_of_total = 10000 } }, }, { .name = "out-of-place", + .inplace_mode = OUT_OF_PLACE, .src_divs = { { .proportion_of_total = 10000 } }, }, { .name = "unaligned buffer, offset=1", @@ -341,6 +362,14 @@ static const struct testvec_config default_cipher_testvec_configs[] = { { .proportion_of_total = 5000 }, }, }, { + .name = "one src, two even splits dst", + .inplace_mode = OUT_OF_PLACE, + .src_divs = { { .proportion_of_total = 10000 } }, + .dst_divs = { + { .proportion_of_total = 5000 }, + { .proportion_of_total = 5000 }, + }, + }, { .name = "uneven misaligned splits, may sleep", .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP, .src_divs = { @@ -352,7 +381,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = { .key_offset = 3, }, { .name = "misaligned splits crossing pages, inplace", - .inplace = true, + .inplace_mode = INPLACE_ONE_SGLIST, .src_divs = { { .proportion_of_total = 7500, @@ -384,17 +413,15 @@ static const struct testvec_config default_hash_testvec_configs[] = { .finalization_type = FINALIZATION_TYPE_FINAL, .key_offset = 1, }, { - .name = "digest buffer aligned only to alignmask", + .name = "digest misaligned buffer", .src_divs = { { .proportion_of_total = 10000, .offset = 1, - .offset_relative_to_alignmask = true, }, }, .finalization_type = FINALIZATION_TYPE_DIGEST, .key_offset = 1, - .key_offset_relative_to_alignmask = true, }, { .name = "init+update+update+final two even splits", .src_divs = { @@ -511,7 +538,8 @@ static bool valid_testvec_config(const struct testvec_config *cfg) cfg->finalization_type == FINALIZATION_TYPE_DIGEST) return false; - if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) && + if ((cfg->nosimd || cfg->nosimd_setkey || + (flags & SGDIVS_HAVE_NOSIMD)) && (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) return false; @@ -750,20 +778,41 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls, struct iov_iter input; int err; - iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len); + iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len); err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask, - cfg->inplace ? + cfg->inplace_mode != OUT_OF_PLACE ? max(dst_total_len, src_total_len) : src_total_len, &input, NULL); if (err) return err; - if (cfg->inplace) { + /* + * In-place crypto operations can use the same scatterlist for both the + * source and destination (req->src == req->dst), or can use separate + * scatterlists (req->src != req->dst) which point to the same + * underlying memory. Make sure to test both cases. + */ + if (cfg->inplace_mode == INPLACE_ONE_SGLIST) { tsgls->dst.sgl_ptr = tsgls->src.sgl; tsgls->dst.nents = tsgls->src.nents; return 0; } + if (cfg->inplace_mode == INPLACE_TWO_SGLISTS) { + /* + * For now we keep it simple and only test the case where the + * two scatterlists have identical entries, rather than + * different entries that split up the same memory differently. + */ + memcpy(tsgls->dst.sgl, tsgls->src.sgl, + tsgls->src.nents * sizeof(tsgls->src.sgl[0])); + memcpy(tsgls->dst.sgl_saved, tsgls->src.sgl, + tsgls->src.nents * sizeof(tsgls->src.sgl[0])); + tsgls->dst.sgl_ptr = tsgls->dst.sgl; + tsgls->dst.nents = tsgls->src.nents; + return 0; + } + /* Out of place */ return build_test_sglist(&tsgls->dst, cfg->dst_divs[0].proportion_of_total ? cfg->dst_divs : cfg->src_divs, @@ -798,7 +847,10 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize, return 0; } -/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */ +/* + * Like setkey_f(tfm, key, ksize), but sometimes misalign the key. + * In addition, run the setkey function in no-SIMD context if requested. + */ #define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \ ({ \ const u8 *keybuf, *keyptr; \ @@ -807,69 +859,116 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize, err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \ &keybuf, &keyptr); \ if (err == 0) { \ + if ((cfg)->nosimd_setkey) \ + crypto_disable_simd_for_test(); \ err = setkey_f((tfm), keyptr, (ksize)); \ + if ((cfg)->nosimd_setkey) \ + crypto_reenable_simd_for_test(); \ kfree(keybuf); \ } \ err; \ }) -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +/* + * The fuzz tests use prandom instead of the normal Linux RNG since they don't + * need cryptographically secure random numbers. This greatly improves the + * performance of these tests, especially if they are run before the Linux RNG + * has been initialized or if they are run on a lockdep-enabled kernel. + */ + +static inline void init_rnd_state(struct rnd_state *rng) +{ + prandom_seed_state(rng, get_random_u64()); +} + +static inline u8 prandom_u8(struct rnd_state *rng) +{ + return prandom_u32_state(rng); +} + +static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil) +{ + /* + * This is slightly biased for non-power-of-2 values of 'ceil', but this + * isn't important here. + */ + return prandom_u32_state(rng) % ceil; +} + +static inline bool prandom_bool(struct rnd_state *rng) +{ + return prandom_u32_below(rng, 2); +} + +static inline u32 prandom_u32_inclusive(struct rnd_state *rng, + u32 floor, u32 ceil) +{ + return floor + prandom_u32_below(rng, ceil - floor + 1); +} /* Generate a random length in range [0, max_len], but prefer smaller values */ -static unsigned int generate_random_length(unsigned int max_len) +static unsigned int generate_random_length(struct rnd_state *rng, + unsigned int max_len) { - unsigned int len = prandom_u32() % (max_len + 1); + unsigned int len = prandom_u32_below(rng, max_len + 1); - switch (prandom_u32() % 4) { + switch (prandom_u32_below(rng, 4)) { case 0: - return len % 64; + len %= 64; + break; case 1: - return len % 256; + len %= 256; + break; case 2: - return len % 1024; + len %= 1024; + break; default: - return len; + break; } + if (len && prandom_u32_below(rng, 4) == 0) + len = rounddown_pow_of_two(len); + return len; } /* Flip a random bit in the given nonempty data buffer */ -static void flip_random_bit(u8 *buf, size_t size) +static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size) { size_t bitpos; - bitpos = prandom_u32() % (size * 8); + bitpos = prandom_u32_below(rng, size * 8); buf[bitpos / 8] ^= 1 << (bitpos % 8); } /* Flip a random byte in the given nonempty data buffer */ -static void flip_random_byte(u8 *buf, size_t size) +static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size) { - buf[prandom_u32() % size] ^= 0xff; + buf[prandom_u32_below(rng, size)] ^= 0xff; } /* Sometimes make some random changes to the given nonempty data buffer */ -static void mutate_buffer(u8 *buf, size_t size) +static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size) { size_t num_flips; size_t i; /* Sometimes flip some bits */ - if (prandom_u32() % 4 == 0) { - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); + if (prandom_u32_below(rng, 4) == 0) { + num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), + size * 8); for (i = 0; i < num_flips; i++) - flip_random_bit(buf, size); + flip_random_bit(rng, buf, size); } /* Sometimes flip some bytes */ - if (prandom_u32() % 4 == 0) { - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); + if (prandom_u32_below(rng, 4) == 0) { + num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size); for (i = 0; i < num_flips; i++) - flip_random_byte(buf, size); + flip_random_byte(rng, buf, size); } } /* Randomly generate 'count' bytes, but sometimes make them "interesting" */ -static void generate_random_bytes(u8 *buf, size_t count) +static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count) { u8 b; u8 increment; @@ -878,11 +977,11 @@ static void generate_random_bytes(u8 *buf, size_t count) if (count == 0) return; - switch (prandom_u32() % 8) { /* Choose a generation strategy */ + switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */ case 0: case 1: /* All the same byte, plus optional mutations */ - switch (prandom_u32() % 4) { + switch (prandom_u32_below(rng, 4)) { case 0: b = 0x00; break; @@ -890,28 +989,28 @@ static void generate_random_bytes(u8 *buf, size_t count) b = 0xff; break; default: - b = (u8)prandom_u32(); + b = prandom_u8(rng); break; } memset(buf, b, count); - mutate_buffer(buf, count); + mutate_buffer(rng, buf, count); break; case 2: /* Ascending or descending bytes, plus optional mutations */ - increment = (u8)prandom_u32(); - b = (u8)prandom_u32(); + increment = prandom_u8(rng); + b = prandom_u8(rng); for (i = 0; i < count; i++, b += increment) buf[i] = b; - mutate_buffer(buf, count); + mutate_buffer(rng, buf, count); break; default: /* Fully random bytes */ - for (i = 0; i < count; i++) - buf[i] = (u8)prandom_u32(); + prandom_bytes_state(rng, buf, count); } } -static char *generate_random_sgl_divisions(struct test_sg_division *divs, +static char *generate_random_sgl_divisions(struct rnd_state *rng, + struct test_sg_division *divs, size_t max_divs, char *p, char *end, bool gen_flushes, u32 req_flags) { @@ -922,24 +1021,28 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs, unsigned int this_len; const char *flushtype_str; - if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) + if (div == &divs[max_divs - 1] || prandom_bool(rng)) this_len = remaining; + else if (prandom_u32_below(rng, 4) == 0) + this_len = (remaining + 1) / 2; else - this_len = 1 + (prandom_u32() % remaining); + this_len = prandom_u32_inclusive(rng, 1, remaining); div->proportion_of_total = this_len; - if (prandom_u32() % 4 == 0) - div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); - else if (prandom_u32() % 2 == 0) - div->offset = prandom_u32() % 32; + if (prandom_u32_below(rng, 4) == 0) + div->offset = prandom_u32_inclusive(rng, + PAGE_SIZE - 128, + PAGE_SIZE - 1); + else if (prandom_bool(rng)) + div->offset = prandom_u32_below(rng, 32); else - div->offset = prandom_u32() % PAGE_SIZE; - if (prandom_u32() % 8 == 0) + div->offset = prandom_u32_below(rng, PAGE_SIZE); + if (prandom_u32_below(rng, 8) == 0) div->offset_relative_to_alignmask = true; div->flush_type = FLUSH_TYPE_NONE; if (gen_flushes) { - switch (prandom_u32() % 4) { + switch (prandom_u32_below(rng, 4)) { case 0: div->flush_type = FLUSH_TYPE_REIMPORT; break; @@ -951,7 +1054,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs, if (div->flush_type != FLUSH_TYPE_NONE && !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && - prandom_u32() % 2 == 0) + prandom_bool(rng)) div->nosimd = true; switch (div->flush_type) { @@ -986,7 +1089,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs, } /* Generate a random testvec_config for fuzz testing */ -static void generate_random_testvec_config(struct testvec_config *cfg, +static void generate_random_testvec_config(struct rnd_state *rng, + struct testvec_config *cfg, char *name, size_t max_namelen) { char *p = name; @@ -998,17 +1102,27 @@ static void generate_random_testvec_config(struct testvec_config *cfg, p += scnprintf(p, end - p, "random:"); - if (prandom_u32() % 2 == 0) { - cfg->inplace = true; - p += scnprintf(p, end - p, " inplace"); + switch (prandom_u32_below(rng, 4)) { + case 0: + case 1: + cfg->inplace_mode = OUT_OF_PLACE; + break; + case 2: + cfg->inplace_mode = INPLACE_ONE_SGLIST; + p += scnprintf(p, end - p, " inplace_one_sglist"); + break; + default: + cfg->inplace_mode = INPLACE_TWO_SGLISTS; + p += scnprintf(p, end - p, " inplace_two_sglists"); + break; } - if (prandom_u32() % 2 == 0) { + if (prandom_bool(rng)) { cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; p += scnprintf(p, end - p, " may_sleep"); } - switch (prandom_u32() % 4) { + switch (prandom_u32_below(rng, 4)) { case 0: cfg->finalization_type = FINALIZATION_TYPE_FINAL; p += scnprintf(p, end - p, " use_final"); @@ -1023,36 +1137,43 @@ static void generate_random_testvec_config(struct testvec_config *cfg, break; } - if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && - prandom_u32() % 2 == 0) { - cfg->nosimd = true; - p += scnprintf(p, end - p, " nosimd"); + if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) { + if (prandom_bool(rng)) { + cfg->nosimd = true; + p += scnprintf(p, end - p, " nosimd"); + } + if (prandom_bool(rng)) { + cfg->nosimd_setkey = true; + p += scnprintf(p, end - p, " nosimd_setkey"); + } } p += scnprintf(p, end - p, " src_divs=["); - p = generate_random_sgl_divisions(cfg->src_divs, + p = generate_random_sgl_divisions(rng, cfg->src_divs, ARRAY_SIZE(cfg->src_divs), p, end, (cfg->finalization_type != FINALIZATION_TYPE_DIGEST), cfg->req_flags); p += scnprintf(p, end - p, "]"); - if (!cfg->inplace && prandom_u32() % 2 == 0) { + if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) { p += scnprintf(p, end - p, " dst_divs=["); - p = generate_random_sgl_divisions(cfg->dst_divs, + p = generate_random_sgl_divisions(rng, cfg->dst_divs, ARRAY_SIZE(cfg->dst_divs), p, end, false, cfg->req_flags); p += scnprintf(p, end - p, "]"); } - if (prandom_u32() % 2 == 0) { - cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); + if (prandom_bool(rng)) { + cfg->iv_offset = prandom_u32_inclusive(rng, 1, + MAX_ALGAPI_ALIGNMASK); p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); } - if (prandom_u32() % 2 == 0) { - cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); + if (prandom_bool(rng)) { + cfg->key_offset = prandom_u32_inclusive(rng, 1, + MAX_ALGAPI_ALIGNMASK); p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); } @@ -1061,14 +1182,18 @@ static void generate_random_testvec_config(struct testvec_config *cfg, static void crypto_disable_simd_for_test(void) { - preempt_disable(); +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL + migrate_disable(); __this_cpu_write(crypto_simd_disabled_for_test, true); +#endif } static void crypto_reenable_simd_for_test(void) { +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL __this_cpu_write(crypto_simd_disabled_for_test, false); - preempt_enable(); + migrate_enable(); +#endif } /* @@ -1112,15 +1237,6 @@ too_long: algname); return -ENAMETOOLONG; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static void crypto_disable_simd_for_test(void) -{ -} - -static void crypto_reenable_simd_for_test(void) -{ -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int build_hash_sglist(struct test_sglist *tsgl, const struct hash_testvec *vec, @@ -1133,7 +1249,7 @@ static int build_hash_sglist(struct test_sglist *tsgl, kv.iov_base = (void *)vec->plaintext; kv.iov_len = vec->psize; - iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize); + iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize); return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, &input, divs); } @@ -1177,7 +1293,6 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_shash *tfm = desc->tfm; - const unsigned int alignmask = crypto_shash_alignmask(tfm); const unsigned int digestsize = crypto_shash_digestsize(tfm); const unsigned int statesize = crypto_shash_statesize(tfm); const char *driver = crypto_shash_driver_name(tfm); @@ -1189,7 +1304,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1206,7 +1321,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); @@ -1361,7 +1476,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - const unsigned int alignmask = crypto_ahash_alignmask(tfm); const unsigned int digestsize = crypto_ahash_digestsize(tfm); const unsigned int statesize = crypto_ahash_statesize(tfm); const char *driver = crypto_ahash_driver_name(tfm); @@ -1377,7 +1491,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1394,7 +1508,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); @@ -1563,13 +1677,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { + struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; + init_rnd_state(&rng); + for (i = 0; i < fuzz_iterations; i++) { - generate_random_testvec_config(&cfg, cfgname, + generate_random_testvec_config(&rng, &cfg, cfgname, sizeof(cfgname)); err = test_hash_vec_cfg(vec, vec_name, &cfg, req, desc, tsgl, hashstate); @@ -1578,24 +1694,23 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a hash test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. */ -static void generate_random_hash_testvec(struct shash_desc *desc, +static void generate_random_hash_testvec(struct rnd_state *rng, + struct ahash_request *req, struct hash_testvec *vec, unsigned int maxkeysize, unsigned int maxdatasize, char *name, size_t max_namelen) { /* Data */ - vec->psize = generate_random_length(maxdatasize); - generate_random_bytes((u8 *)vec->plaintext, vec->psize); + vec->psize = generate_random_length(rng, maxdatasize); + generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize); /* * Key: length in range [1, maxkeysize], but usually choose maxkeysize. @@ -1605,20 +1720,21 @@ static void generate_random_hash_testvec(struct shash_desc *desc, vec->ksize = 0; if (maxkeysize) { vec->ksize = maxkeysize; - if (prandom_u32() % 4 == 0) - vec->ksize = 1 + (prandom_u32() % maxkeysize); - generate_random_bytes((u8 *)vec->key, vec->ksize); + if (prandom_u32_below(rng, 4) == 0) + vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize); + generate_random_bytes(rng, (u8 *)vec->key, vec->ksize); - vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, - vec->ksize); + vec->setkey_error = crypto_ahash_setkey( + crypto_ahash_reqtfm(req), vec->key, vec->ksize); /* If the key couldn't be set, no need to continue to digest. */ if (vec->setkey_error) goto done; } /* Digest */ - vec->digest_error = crypto_shash_digest(desc, vec->plaintext, - vec->psize, (u8 *)vec->digest); + vec->digest_error = crypto_hash_digest( + crypto_ahash_reqtfm(req), vec->plaintext, + vec->psize, (u8 *)vec->digest); done: snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"", vec->psize, vec->ksize); @@ -1641,9 +1757,10 @@ static int test_hash_vs_generic_impl(const char *generic_driver, const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; const char *algname = crypto_hash_alg_common(tfm)->base.cra_name; const char *driver = crypto_ahash_driver_name(tfm); + struct rnd_state rng; char _generic_driver[CRYPTO_MAX_ALG_NAME]; - struct crypto_shash *generic_tfm = NULL; - struct shash_desc *generic_desc = NULL; + struct ahash_request *generic_req = NULL; + struct crypto_ahash *generic_tfm = NULL; unsigned int i; struct hash_testvec vec = { 0 }; char vec_name[64]; @@ -1651,9 +1768,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) + if (noslowtests) return 0; + init_rnd_state(&rng); + if (!generic_driver) { /* Use default naming convention? */ err = build_generic_driver_name(algname, _generic_driver); if (err) @@ -1664,7 +1783,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */ return 0; - generic_tfm = crypto_alloc_shash(generic_driver, 0, 0); + generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0); if (IS_ERR(generic_tfm)) { err = PTR_ERR(generic_tfm); if (err == -ENOENT) { @@ -1683,27 +1802,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver, goto out; } - generic_desc = kzalloc(sizeof(*desc) + - crypto_shash_descsize(generic_tfm), GFP_KERNEL); - if (!generic_desc) { + generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL); + if (!generic_req) { err = -ENOMEM; goto out; } - generic_desc->tfm = generic_tfm; /* Check the algorithm properties for consistency. */ - if (digestsize != crypto_shash_digestsize(generic_tfm)) { + if (digestsize != crypto_ahash_digestsize(generic_tfm)) { pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n", driver, digestsize, - crypto_shash_digestsize(generic_tfm)); + crypto_ahash_digestsize(generic_tfm)); err = -EINVAL; goto out; } - if (blocksize != crypto_shash_blocksize(generic_tfm)) { + if (blocksize != crypto_ahash_blocksize(generic_tfm)) { pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n", - driver, blocksize, crypto_shash_blocksize(generic_tfm)); + driver, blocksize, crypto_ahash_blocksize(generic_tfm)); err = -EINVAL; goto out; } @@ -1722,10 +1839,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver, } for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_hash_testvec(generic_desc, &vec, + generate_random_hash_testvec(&rng, generic_req, &vec, maxkeysize, maxdatasize, vec_name, sizeof(vec_name)); - generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); + generate_random_testvec_config(&rng, cfg, cfgname, + sizeof(cfgname)); err = test_hash_vec_cfg(&vec, vec_name, cfg, req, desc, tsgl, hashstate); @@ -1739,21 +1857,10 @@ out: kfree(vec.key); kfree(vec.plaintext); kfree(vec.digest); - crypto_free_shash(generic_tfm); - kfree_sensitive(generic_desc); + ahash_request_free(generic_req); + crypto_free_ahash(generic_tfm); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_hash_vs_generic_impl(const char *generic_driver, - unsigned int maxkeysize, - struct ahash_request *req, - struct shash_desc *desc, - struct test_sglist *tsgl, - u8 *hashstate) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int alloc_shash(const char *driver, u32 type, u32 mask, struct crypto_shash **tfm_ret, @@ -1764,7 +1871,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask, tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { + if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) { /* * This algorithm is only available through the ahash * API, not the shash API, so skip the shash tests. @@ -1810,6 +1917,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs, atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { + if (PTR_ERR(atfm) == -ENOENT) + return 0; pr_err("alg: hash: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(atfm)); return PTR_ERR(atfm); @@ -1854,6 +1963,9 @@ static int __alg_test_hash(const struct hash_testvec *vecs, } for (i = 0; i < num_vecs; i++) { + if (fips_enabled && vecs[i].fips_skip) + continue; + err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate); if (err) goto out; @@ -2085,7 +2197,8 @@ static int test_aead_vec_cfg(int enc, const struct aead_testvec *vec, /* Check for the correct output (ciphertext or plaintext) */ err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, enc ? vec->clen : vec->plen, - vec->alen, enc || !cfg->inplace); + vec->alen, + enc || cfg->inplace_mode == OUT_OF_PLACE); if (err == -EOVERFLOW) { pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n", driver, op, vec_name, cfg->name); @@ -2121,13 +2234,15 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { + struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; + init_rnd_state(&rng); + for (i = 0; i < fuzz_iterations; i++) { - generate_random_testvec_config(&cfg, cfgname, + generate_random_testvec_config(&rng, &cfg, cfgname, sizeof(cfgname)); err = test_aead_vec_cfg(enc, vec, vec_name, &cfg, req, tsgls); @@ -2136,13 +2251,11 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - -struct aead_extra_tests_ctx { +struct aead_slow_tests_ctx { + struct rnd_state rng; struct aead_request *req; struct crypto_aead *tfm; const struct alg_test_desc *test_desc; @@ -2161,24 +2274,26 @@ struct aead_extra_tests_ctx { * here means the full ciphertext including the authentication tag. The * authentication tag (and hence also the ciphertext) is assumed to be nonempty. */ -static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv, +static void mutate_aead_message(struct rnd_state *rng, + struct aead_testvec *vec, bool aad_iv, unsigned int ivsize) { const unsigned int aad_tail_size = aad_iv ? ivsize : 0; const unsigned int authsize = vec->clen - vec->plen; - if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) { + if (prandom_bool(rng) && vec->alen > aad_tail_size) { /* Mutate the AAD */ - flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size); - if (prandom_u32() % 2 == 0) + flip_random_bit(rng, (u8 *)vec->assoc, + vec->alen - aad_tail_size); + if (prandom_bool(rng)) return; } - if (prandom_u32() % 2 == 0) { + if (prandom_bool(rng)) { /* Mutate auth tag (assuming it's at the end of ciphertext) */ - flip_random_bit((u8 *)vec->ctext + vec->plen, authsize); + flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize); } else { /* Mutate any part of the ciphertext */ - flip_random_bit((u8 *)vec->ctext, vec->clen); + flip_random_bit(rng, (u8 *)vec->ctext, vec->clen); } } @@ -2189,7 +2304,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv, */ #define MIN_COLLISION_FREE_AUTHSIZE 8 -static void generate_aead_message(struct aead_request *req, +static void generate_aead_message(struct rnd_state *rng, + struct aead_request *req, const struct aead_test_suite *suite, struct aead_testvec *vec, bool prefer_inauthentic) @@ -2198,17 +2314,18 @@ static void generate_aead_message(struct aead_request *req, const unsigned int ivsize = crypto_aead_ivsize(tfm); const unsigned int authsize = vec->clen - vec->plen; const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) && - (prefer_inauthentic || prandom_u32() % 4 == 0); + (prefer_inauthentic || + prandom_u32_below(rng, 4) == 0); /* Generate the AAD. */ - generate_random_bytes((u8 *)vec->assoc, vec->alen); + generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen); if (suite->aad_iv && vec->alen >= ivsize) /* Avoid implementation-defined behavior. */ memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize); - if (inauthentic && prandom_u32() % 2 == 0) { + if (inauthentic && prandom_bool(rng)) { /* Generate a random ciphertext. */ - generate_random_bytes((u8 *)vec->ctext, vec->clen); + generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen); } else { int i = 0; struct scatterlist src[2], dst; @@ -2220,7 +2337,7 @@ static void generate_aead_message(struct aead_request *req, if (vec->alen) sg_set_buf(&src[i++], vec->assoc, vec->alen); if (vec->plen) { - generate_random_bytes((u8 *)vec->ptext, vec->plen); + generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen); sg_set_buf(&src[i++], vec->ptext, vec->plen); } sg_init_one(&dst, vec->ctext, vec->alen + vec->clen); @@ -2240,7 +2357,7 @@ static void generate_aead_message(struct aead_request *req, * Mutate the authentic (ciphertext, AAD) pair to get an * inauthentic one. */ - mutate_aead_message(vec, suite->aad_iv, ivsize); + mutate_aead_message(rng, vec, suite->aad_iv, ivsize); } vec->novrfy = 1; if (suite->einval_allowed) @@ -2254,7 +2371,8 @@ static void generate_aead_message(struct aead_request *req, * If 'prefer_inauthentic' is true, then this function will generate inauthentic * test vectors (i.e. vectors with 'vec->novrfy=1') more often. */ -static void generate_random_aead_testvec(struct aead_request *req, +static void generate_random_aead_testvec(struct rnd_state *rng, + struct aead_request *req, struct aead_testvec *vec, const struct aead_test_suite *suite, unsigned int maxkeysize, @@ -2270,18 +2388,18 @@ static void generate_random_aead_testvec(struct aead_request *req, /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ vec->klen = maxkeysize; - if (prandom_u32() % 4 == 0) - vec->klen = prandom_u32() % (maxkeysize + 1); - generate_random_bytes((u8 *)vec->key, vec->klen); + if (prandom_u32_below(rng, 4) == 0) + vec->klen = prandom_u32_below(rng, maxkeysize + 1); + generate_random_bytes(rng, (u8 *)vec->key, vec->klen); vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen); /* IV */ - generate_random_bytes((u8 *)vec->iv, ivsize); + generate_random_bytes(rng, (u8 *)vec->iv, ivsize); /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */ authsize = maxauthsize; - if (prandom_u32() % 4 == 0) - authsize = prandom_u32() % (maxauthsize + 1); + if (prandom_u32_below(rng, 4) == 0) + authsize = prandom_u32_below(rng, maxauthsize + 1); if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE) authsize = MIN_COLLISION_FREE_AUTHSIZE; if (WARN_ON(authsize > maxdatasize)) @@ -2290,11 +2408,11 @@ static void generate_random_aead_testvec(struct aead_request *req, vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize); /* AAD, plaintext, and ciphertext lengths */ - total_len = generate_random_length(maxdatasize); - if (prandom_u32() % 4 == 0) + total_len = generate_random_length(rng, maxdatasize); + if (prandom_u32_below(rng, 4) == 0) vec->alen = 0; else - vec->alen = generate_random_length(total_len); + vec->alen = generate_random_length(rng, total_len); vec->plen = total_len - vec->alen; vec->clen = vec->plen + authsize; @@ -2305,19 +2423,18 @@ static void generate_random_aead_testvec(struct aead_request *req, vec->novrfy = 0; vec->crypt_error = 0; if (vec->setkey_error == 0 && vec->setauthsize_error == 0) - generate_aead_message(req, suite, vec, prefer_inauthentic); + generate_aead_message(rng, req, suite, vec, prefer_inauthentic); snprintf(name, max_namelen, "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"", vec->alen, vec->plen, authsize, vec->klen, vec->novrfy); } -static void try_to_generate_inauthentic_testvec( - struct aead_extra_tests_ctx *ctx) +static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx) { int i; for (i = 0; i < 10; i++) { - generate_random_aead_testvec(ctx->req, &ctx->vec, + generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec, &ctx->test_desc->suite.aead, ctx->maxkeysize, ctx->maxdatasize, ctx->vec_name, @@ -2331,7 +2448,7 @@ static void try_to_generate_inauthentic_testvec( * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the * result of an encryption with the key) and verify that decryption fails. */ -static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) +static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx) { unsigned int i; int err; @@ -2348,7 +2465,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) */ try_to_generate_inauthentic_testvec(ctx); if (ctx->vec.novrfy) { - generate_random_testvec_config(&ctx->cfg, ctx->cfgname, + generate_random_testvec_config(&ctx->rng, &ctx->cfg, + ctx->cfgname, sizeof(ctx->cfgname)); err = test_aead_vec_cfg(DECRYPT, &ctx->vec, ctx->vec_name, &ctx->cfg, @@ -2365,7 +2483,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) * Test the AEAD algorithm against the corresponding generic implementation, if * one is available. */ -static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) +static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx) { struct crypto_aead *tfm = ctx->tfm; const char *algname = crypto_aead_alg(tfm)->base.cra_name; @@ -2438,12 +2556,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) * the other implementation against them. */ for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_aead_testvec(generic_req, &ctx->vec, + generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec, &ctx->test_desc->suite.aead, ctx->maxkeysize, ctx->maxdatasize, ctx->vec_name, sizeof(ctx->vec_name), false); - generate_random_testvec_config(&ctx->cfg, ctx->cfgname, + generate_random_testvec_config(&ctx->rng, &ctx->cfg, + ctx->cfgname, sizeof(ctx->cfgname)); if (!ctx->vec.novrfy) { err = test_aead_vec_cfg(ENCRYPT, &ctx->vec, @@ -2468,20 +2587,21 @@ out: return err; } -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) +static int test_aead_slow(const struct alg_test_desc *test_desc, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { - struct aead_extra_tests_ctx *ctx; + struct aead_slow_tests_ctx *ctx; unsigned int i; int err; - if (noextratests) + if (noslowtests) return 0; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; + init_rnd_state(&ctx->rng); ctx->req = req; ctx->tfm = crypto_aead_reqtfm(req); ctx->test_desc = test_desc; @@ -2517,14 +2637,6 @@ out: kfree(ctx); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_aead(int enc, const struct aead_test_suite *suite, struct aead_request *req, @@ -2558,6 +2670,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: aead: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -2588,7 +2702,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, if (err) goto out; - err = test_aead_extra(desc, req, tsgls); + err = test_aead_slow(desc, req, tsgls); out: free_cipher_test_sglists(tsgls); aead_request_free(req); @@ -2729,18 +2843,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec, if (ivsize) { if (WARN_ON(ivsize > MAX_IVLEN)) return -EINVAL; - if (vec->generates_iv && !enc) - memcpy(iv, vec->iv_out, ivsize); - else if (vec->iv) + if (vec->iv) memcpy(iv, vec->iv, ivsize); else memset(iv, 0, ivsize); } else { - if (vec->generates_iv) { - pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n", - driver, vec_name); - return -EINVAL; - } iv = NULL; } @@ -2869,13 +2976,15 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { + struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; + init_rnd_state(&rng); + for (i = 0; i < fuzz_iterations; i++) { - generate_random_testvec_config(&cfg, cfgname, + generate_random_testvec_config(&rng, &cfg, cfgname, sizeof(cfgname)); err = test_skcipher_vec_cfg(enc, vec, vec_name, &cfg, req, tsgls); @@ -2884,16 +2993,15 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a symmetric cipher test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. */ -static void generate_random_cipher_testvec(struct skcipher_request *req, +static void generate_random_cipher_testvec(struct rnd_state *rng, + struct skcipher_request *req, struct cipher_testvec *vec, unsigned int maxdatasize, char *name, size_t max_namelen) @@ -2907,17 +3015,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req, /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ vec->klen = maxkeysize; - if (prandom_u32() % 4 == 0) - vec->klen = prandom_u32() % (maxkeysize + 1); - generate_random_bytes((u8 *)vec->key, vec->klen); + if (prandom_u32_below(rng, 4) == 0) + vec->klen = prandom_u32_below(rng, maxkeysize + 1); + generate_random_bytes(rng, (u8 *)vec->key, vec->klen); vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen); /* IV */ - generate_random_bytes((u8 *)vec->iv, ivsize); + generate_random_bytes(rng, (u8 *)vec->iv, ivsize); /* Plaintext */ - vec->len = generate_random_length(maxdatasize); - generate_random_bytes((u8 *)vec->ptext, vec->len); + vec->len = generate_random_length(rng, maxdatasize); + generate_random_bytes(rng, (u8 *)vec->ptext, vec->len); /* If the key couldn't be set, no need to continue to encrypt. */ if (vec->setkey_error) @@ -2959,6 +3067,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; const char *algname = crypto_skcipher_alg(tfm)->base.cra_name; const char *driver = crypto_skcipher_driver_name(tfm); + struct rnd_state rng; char _generic_driver[CRYPTO_MAX_ALG_NAME]; struct crypto_skcipher *generic_tfm = NULL; struct skcipher_request *generic_req = NULL; @@ -2969,12 +3078,10 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) + if (noslowtests) return 0; - /* Keywrap isn't supported here yet as it handles its IV differently. */ - if (strncmp(algname, "kw(", 3) == 0) - return 0; + init_rnd_state(&rng); if (!generic_driver) { /* Use default naming convention? */ err = build_generic_driver_name(algname, _generic_driver); @@ -3060,9 +3167,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, } for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_cipher_testvec(generic_req, &vec, maxdatasize, + generate_random_cipher_testvec(&rng, generic_req, &vec, + maxdatasize, vec_name, sizeof(vec_name)); - generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); + generate_random_testvec_config(&rng, cfg, cfgname, + sizeof(cfgname)); err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name, cfg, req, tsgls); @@ -3085,14 +3194,6 @@ out: skcipher_request_free(generic_req); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_skcipher_vs_generic_impl(const char *generic_driver, - struct skcipher_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_skcipher(int enc, const struct cipher_test_suite *suite, struct skcipher_request *req, @@ -3126,6 +3227,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3164,114 +3267,8 @@ out: return err; } -static int test_comp(struct crypto_comp *tfm, - const struct comp_testvec *ctemplate, - const struct comp_testvec *dtemplate, - int ctcount, int dtcount) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); - char *output, *decomp_output; - unsigned int i; - int ret; - - output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!output) - return -ENOMEM; - - decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!decomp_output) { - kfree(output); - return -ENOMEM; - } - - for (i = 0; i < ctcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(output, 0, COMP_BUF_SIZE); - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = ctemplate[i].inlen; - ret = crypto_comp_compress(tfm, ctemplate[i].input, - ilen, output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: compression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - ilen = dlen; - dlen = COMP_BUF_SIZE; - ret = crypto_comp_decompress(tfm, output, - ilen, decomp_output, &dlen); - if (ret) { - pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n", - i + 1, algo, -ret); - goto out; - } - - if (dlen != ctemplate[i].inlen) { - printk(KERN_ERR "alg: comp: Compression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, ctemplate[i].input, - ctemplate[i].inlen)) { - pr_err("alg: comp: compression failed: output differs: on test %d for %s\n", - i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - for (i = 0; i < dtcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = dtemplate[i].inlen; - ret = crypto_comp_decompress(tfm, dtemplate[i].input, - ilen, decomp_output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: decompression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - if (dlen != dtemplate[i].outlen) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, dtemplate[i].output, dlen)) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s\n", i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - ret = 0; - -out: - kfree(decomp_output); - kfree(output); - return ret; -} - static int test_acomp(struct crypto_acomp *tfm, - const struct comp_testvec *ctemplate, + const struct comp_testvec *ctemplate, const struct comp_testvec *dtemplate, int ctcount, int dtcount) { @@ -3439,68 +3436,6 @@ out: return ret; } -static int test_cprng(struct crypto_rng *tfm, - const struct cprng_testvec *template, - unsigned int tcount) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); - int err = 0, i, j, seedsize; - u8 *seed; - char result[32]; - - seedsize = crypto_rng_seedsize(tfm); - - seed = kmalloc(seedsize, GFP_KERNEL); - if (!seed) { - printk(KERN_ERR "alg: cprng: Failed to allocate seed space " - "for %s\n", algo); - return -ENOMEM; - } - - for (i = 0; i < tcount; i++) { - memset(result, 0, 32); - - memcpy(seed, template[i].v, template[i].vlen); - memcpy(seed + template[i].vlen, template[i].key, - template[i].klen); - memcpy(seed + template[i].vlen + template[i].klen, - template[i].dt, template[i].dtlen); - - err = crypto_rng_reset(tfm, seed, seedsize); - if (err) { - printk(KERN_ERR "alg: cprng: Failed to reset rng " - "for %s\n", algo); - goto out; - } - - for (j = 0; j < template[i].loops; j++) { - err = crypto_rng_get_bytes(tfm, result, - template[i].rlen); - if (err < 0) { - printk(KERN_ERR "alg: cprng: Failed to obtain " - "the correct amount of random data for " - "%s (requested %d)\n", algo, - template[i].rlen); - goto out; - } - } - - err = memcmp(result, template[i].result, - template[i].rlen); - if (err) { - printk(KERN_ERR "alg: cprng: Test %d failed for %s\n", - i, algo); - hexdump(result, template[i].rlen); - err = -EINVAL; - goto out; - } - } - -out: - kfree(seed); - return err; -} - static int alg_test_cipher(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -3510,6 +3445,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc, tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3526,115 +3463,25 @@ static int alg_test_cipher(const struct alg_test_desc *desc, static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { - struct crypto_comp *comp; struct crypto_acomp *acomp; int err; - u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; - - if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { - acomp = crypto_alloc_acomp(driver, type, mask); - if (IS_ERR(acomp)) { - pr_err("alg: acomp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(acomp)); - return PTR_ERR(acomp); - } - err = test_acomp(acomp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_acomp(acomp); - } else { - comp = crypto_alloc_comp(driver, type, mask); - if (IS_ERR(comp)) { - pr_err("alg: comp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(comp)); - return PTR_ERR(comp); - } - - err = test_comp(comp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - - crypto_free_comp(comp); - } - return err; -} - -static int alg_test_crc32c(const struct alg_test_desc *desc, - const char *driver, u32 type, u32 mask) -{ - struct crypto_shash *tfm; - __le32 val; - int err; - - err = alg_test_hash(desc, driver, type, mask); - if (err) - return err; - tfm = crypto_alloc_shash(driver, type, mask); - if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { - /* - * This crc32c implementation is only available through - * ahash API, not the shash API, so the remaining part - * of the test is not applicable to it. - */ + acomp = crypto_alloc_acomp(driver, type, mask); + if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) return 0; - } - printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - driver = crypto_shash_driver_name(tfm); - - do { - SHASH_DESC_ON_STACK(shash, tfm); - u32 *ctx = (u32 *)shash_desc_ctx(shash); - - shash->tfm = tfm; - - *ctx = 420553207; - err = crypto_shash_final(shash, (u8 *)&val); - if (err) { - printk(KERN_ERR "alg: crc32c: Operation failed for " - "%s: %d\n", driver, err); - break; - } - - if (val != cpu_to_le32(~420553207)) { - pr_err("alg: crc32c: Test failed for %s: %u\n", - driver, le32_to_cpu(val)); - err = -EINVAL; - } - } while (0); - - crypto_free_shash(tfm); - + pr_err("alg: acomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(acomp)); + return PTR_ERR(acomp); + } + err = test_acomp(acomp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); + crypto_free_acomp(acomp); return err; } -static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, - u32 type, u32 mask) -{ - struct crypto_rng *rng; - int err; - - rng = crypto_alloc_rng(driver, type, mask); - if (IS_ERR(rng)) { - printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(rng)); - return PTR_ERR(rng); - } - - err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count); - - crypto_free_rng(rng); - - return err; -} - - static int drbg_cavs_test(const struct drbg_testvec *test, int pr, const char *driver, u32 type, u32 mask) { @@ -3649,10 +3496,12 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { + kfree_sensitive(buf); + if (PTR_ERR(drng) == -ENOENT) + return 0; printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); - kfree_sensitive(buf); - return -ENOMEM; + return PTR_ERR(drng); } test_data.testentropy = &testentropy; @@ -3894,6 +3743,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3922,11 +3773,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, struct crypto_wait wait; unsigned int out_len_max, out_len = 0; int err = -ENOMEM; - struct scatterlist src, dst, src_tab[3]; - const char *m, *c; - unsigned int m_size, c_size; - const char *op; - u8 *key, *ptr; + struct scatterlist src, dst, src_tab[2]; + const char *c; + unsigned int c_size; if (testmgr_alloc_buf(xbuf)) return err; @@ -3937,92 +3786,53 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, crypto_init_wait(&wait); - key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len, - GFP_KERNEL); - if (!key) - goto free_req; - memcpy(key, vecs->key, vecs->key_len); - ptr = key + vecs->key_len; - ptr = test_pack_u32(ptr, vecs->algo); - ptr = test_pack_u32(ptr, vecs->param_len); - memcpy(ptr, vecs->params, vecs->param_len); - if (vecs->public_key_vec) - err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len); + err = crypto_akcipher_set_pub_key(tfm, vecs->key, + vecs->key_len); else - err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len); + err = crypto_akcipher_set_priv_key(tfm, vecs->key, + vecs->key_len); if (err) - goto free_key; + goto free_req; - /* - * First run test which do not require a private key, such as - * encrypt or verify. - */ + /* First run encrypt test which does not require a private key */ err = -ENOMEM; out_len_max = crypto_akcipher_maxsize(tfm); outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); if (!outbuf_enc) - goto free_key; - - if (!vecs->siggen_sigver_test) { - m = vecs->m; - m_size = vecs->m_size; - c = vecs->c; - c_size = vecs->c_size; - op = "encrypt"; - } else { - /* Swap args so we could keep plaintext (digest) - * in vecs->m, and cooked signature in vecs->c. - */ - m = vecs->c; /* signature */ - m_size = vecs->c_size; - c = vecs->m; /* digest */ - c_size = vecs->m_size; - op = "verify"; - } + goto free_req; + + c = vecs->c; + c_size = vecs->c_size; err = -E2BIG; - if (WARN_ON(m_size > PAGE_SIZE)) + if (WARN_ON(vecs->m_size > PAGE_SIZE)) goto free_all; - memcpy(xbuf[0], m, m_size); + memcpy(xbuf[0], vecs->m, vecs->m_size); - sg_init_table(src_tab, 3); + sg_init_table(src_tab, 2); sg_set_buf(&src_tab[0], xbuf[0], 8); - sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8); - if (vecs->siggen_sigver_test) { - if (WARN_ON(c_size > PAGE_SIZE)) - goto free_all; - memcpy(xbuf[1], c, c_size); - sg_set_buf(&src_tab[2], xbuf[1], c_size); - akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size); - } else { - sg_init_one(&dst, outbuf_enc, out_len_max); - akcipher_request_set_crypt(req, src_tab, &dst, m_size, - out_len_max); - } + sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8); + sg_init_one(&dst, outbuf_enc, out_len_max); + akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, + out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); - err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : - /* Run asymmetric encrypt */ - crypto_akcipher_encrypt(req), &wait); + err = crypto_wait_req(crypto_akcipher_encrypt(req), &wait); if (err) { - pr_err("alg: akcipher: %s test failed. err %d\n", op, err); + pr_err("alg: akcipher: encrypt test failed. err %d\n", err); goto free_all; } - if (!vecs->siggen_sigver_test && c) { + if (c) { if (req->dst_len != c_size) { - pr_err("alg: akcipher: %s test failed. Invalid output len\n", - op); + pr_err("alg: akcipher: encrypt test failed. Invalid output len\n"); err = -EINVAL; goto free_all; } /* verify that encrypted message is equal to expected */ if (memcmp(c, outbuf_enc, c_size) != 0) { - pr_err("alg: akcipher: %s test failed. Invalid output\n", - op); + pr_err("alg: akcipher: encrypt test failed. Invalid output\n"); hexdump(outbuf_enc, c_size); err = -EINVAL; goto free_all; @@ -4030,7 +3840,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, } /* - * Don't invoke (decrypt or sign) test which require a private key + * Don't invoke decrypt test which requires a private key * for vectors with only a public key. */ if (vecs->public_key_vec) { @@ -4043,13 +3853,12 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, goto free_all; } - if (!vecs->siggen_sigver_test && !c) { + if (!c) { c = outbuf_enc; c_size = req->dst_len; } err = -E2BIG; - op = vecs->siggen_sigver_test ? "sign" : "decrypt"; if (WARN_ON(c_size > PAGE_SIZE)) goto free_all; memcpy(xbuf[0], c, c_size); @@ -4059,34 +3868,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, crypto_init_wait(&wait); akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max); - err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : - /* Run asymmetric decrypt */ - crypto_akcipher_decrypt(req), &wait); + err = crypto_wait_req(crypto_akcipher_decrypt(req), &wait); if (err) { - pr_err("alg: akcipher: %s test failed. err %d\n", op, err); + pr_err("alg: akcipher: decrypt test failed. err %d\n", err); goto free_all; } out_len = req->dst_len; - if (out_len < m_size) { - pr_err("alg: akcipher: %s test failed. Invalid output len %u\n", - op, out_len); + if (out_len < vecs->m_size) { + pr_err("alg: akcipher: decrypt test failed. Invalid output len %u\n", + out_len); err = -EINVAL; goto free_all; } /* verify that decrypted message is equal to the original msg */ - if (memchr_inv(outbuf_dec, 0, out_len - m_size) || - memcmp(m, outbuf_dec + out_len - m_size, m_size)) { - pr_err("alg: akcipher: %s test failed. Invalid output\n", op); + if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) || + memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size, + vecs->m_size)) { + pr_err("alg: akcipher: decrypt test failed. Invalid output\n"); hexdump(outbuf_dec, out_len); err = -EINVAL; } free_all: kfree(outbuf_dec); kfree(outbuf_enc); -free_key: - kfree(key); free_req: akcipher_request_free(req); free_xbuf: @@ -4122,6 +3926,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4134,6 +3940,114 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, return err; } +static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs) +{ + u8 *ptr, *key __free(kfree); + int err, sig_size; + + key = kmalloc(vecs->key_len + 2 * sizeof(u32) + vecs->param_len, + GFP_KERNEL); + if (!key) + return -ENOMEM; + + /* ecrdsa expects additional parameters appended to the key */ + memcpy(key, vecs->key, vecs->key_len); + ptr = key + vecs->key_len; + ptr = test_pack_u32(ptr, vecs->algo); + ptr = test_pack_u32(ptr, vecs->param_len); + memcpy(ptr, vecs->params, vecs->param_len); + + if (vecs->public_key_vec) + err = crypto_sig_set_pubkey(tfm, key, vecs->key_len); + else + err = crypto_sig_set_privkey(tfm, key, vecs->key_len); + if (err) + return err; + + /* + * Run asymmetric signature verification first + * (which does not require a private key) + */ + err = crypto_sig_verify(tfm, vecs->c, vecs->c_size, + vecs->m, vecs->m_size); + if (err) { + pr_err("alg: sig: verify test failed: err %d\n", err); + return err; + } + + /* + * Don't invoke sign test (which requires a private key) + * for vectors with only a public key. + */ + if (vecs->public_key_vec) + return 0; + + sig_size = crypto_sig_maxsize(tfm); + if (sig_size < vecs->c_size) { + pr_err("alg: sig: invalid maxsize %u\n", sig_size); + return -EINVAL; + } + + u8 *sig __free(kfree) = kzalloc(sig_size, GFP_KERNEL); + if (!sig) + return -ENOMEM; + + /* Run asymmetric signature generation */ + err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size); + if (err < 0) { + pr_err("alg: sig: sign test failed: err %d\n", err); + return err; + } + + /* Verify that generated signature equals cooked signature */ + if (err != vecs->c_size || + memcmp(sig, vecs->c, vecs->c_size) || + memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) { + pr_err("alg: sig: sign test failed: invalid output\n"); + hexdump(sig, sig_size); + return -EINVAL; + } + + return 0; +} + +static int test_sig(struct crypto_sig *tfm, const char *alg, + const struct sig_testvec *vecs, unsigned int tcount) +{ + const char *algo = crypto_tfm_alg_driver_name(crypto_sig_tfm(tfm)); + int ret, i; + + for (i = 0; i < tcount; i++) { + ret = test_sig_one(tfm, vecs++); + if (ret) { + pr_err("alg: sig: test %d failed for %s: err %d\n", + i + 1, algo, ret); + return ret; + } + } + return 0; +} + +static int alg_test_sig(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + struct crypto_sig *tfm; + int err = 0; + + tfm = crypto_alloc_sig(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: sig: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + if (desc->suite.sig.vecs) + err = test_sig(tfm, desc->alg, desc->suite.sig.vecs, + desc->suite.sig.count); + + crypto_free_sig(tfm); + return err; +} + static int alg_test_null(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -4147,14 +4061,14 @@ static int alg_test_null(const struct alg_test_desc *desc, static const struct alg_test_desc alg_test_descs[] = { { .alg = "adiantum(xchacha12,aes)", - .generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)", + .generic_driver = "adiantum(xchacha12-lib,aes-generic,nhpoly1305-generic)", .test = alg_test_skcipher, .suite = { .cipher = __VECS(adiantum_xchacha12_aes_tv_template) }, }, { .alg = "adiantum(xchacha20,aes)", - .generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)", + .generic_driver = "adiantum(xchacha20-lib,aes-generic,nhpoly1305-generic)", .test = alg_test_skcipher, .suite = { .cipher = __VECS(adiantum_xchacha20_aes_tv_template) @@ -4166,19 +4080,15 @@ static const struct alg_test_desc alg_test_descs[] = { .aead = __VECS(aegis128_tv_template) } }, { - .alg = "ansi_cprng", - .test = alg_test_cprng, - .suite = { - .cprng = __VECS(ansi_cprng_aes_tv_template) - } - }, { .alg = "authenc(hmac(md5),ecb(cipher_null))", + .generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_md5_ecb_cipher_null_tv_template) } }, { .alg = "authenc(hmac(sha1),cbc(aes))", + .generic_driver = "authenc(hmac-sha1-lib,cbc(aes-generic))", .test = alg_test_aead, .fips_allowed = 1, .suite = { @@ -4186,14 +4096,15 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha1),cbc(des))", + .generic_driver = "authenc(hmac-sha1-lib,cbc(des-generic))", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha1_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),cbc(des3_ede))", + .generic_driver = "authenc(hmac-sha1-lib,cbc(des3_ede-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp) } @@ -4203,6 +4114,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, }, { .alg = "authenc(hmac(sha1),ecb(cipher_null))", + .generic_driver = "authenc(hmac-sha1-lib,ecb-cipher_null)", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp) @@ -4213,19 +4125,21 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, }, { .alg = "authenc(hmac(sha224),cbc(des))", + .generic_driver = "authenc(hmac-sha224-lib,cbc(des-generic))", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha224_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha224),cbc(des3_ede))", + .generic_driver = "authenc(hmac-sha224-lib,cbc(des3_ede-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(aes))", + .generic_driver = "authenc(hmac-sha256-lib,cbc(aes-generic))", .test = alg_test_aead, .fips_allowed = 1, .suite = { @@ -4233,14 +4147,15 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha256),cbc(des))", + .generic_driver = "authenc(hmac-sha256-lib,cbc(des-generic))", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha256_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(des3_ede))", + .generic_driver = "authenc(hmac-sha256-lib,cbc(des3_ede-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp) } @@ -4249,19 +4164,27 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha256),cts(cbc(aes)))", + .generic_driver = "authenc(hmac-sha256-lib,cts(cbc(aes-generic)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128) + } + }, { .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, }, { .alg = "authenc(hmac(sha384),cbc(des))", + .generic_driver = "authenc(hmac-sha384-lib,cbc(des-generic))", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha384_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha384),cbc(des3_ede))", + .generic_driver = "authenc(hmac-sha384-lib,cbc(des3_ede-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp) } @@ -4270,11 +4193,19 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha384),cts(cbc(aes)))", + .generic_driver = "authenc(hmac-sha384-lib,cts(cbc(aes-generic)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192) + } + }, { .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, }, { .alg = "authenc(hmac(sha512),cbc(aes))", + .generic_driver = "authenc(hmac-sha512-lib,cbc(aes-generic))", .fips_allowed = 1, .test = alg_test_aead, .suite = { @@ -4282,14 +4213,15 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "authenc(hmac(sha512),cbc(des))", + .generic_driver = "authenc(hmac-sha512-lib,cbc(des-generic))", .test = alg_test_aead, .suite = { .aead = __VECS(hmac_sha512_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),cbc(des3_ede))", + .generic_driver = "authenc(hmac-sha512-lib,cbc(des3_ede-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp) } @@ -4303,6 +4235,7 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, }, { .alg = "blake2b-160", + .generic_driver = "blake2b-160-lib", .test = alg_test_hash, .fips_allowed = 0, .suite = { @@ -4310,6 +4243,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "blake2b-256", + .generic_driver = "blake2b-256-lib", .test = alg_test_hash, .fips_allowed = 0, .suite = { @@ -4317,6 +4251,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "blake2b-384", + .generic_driver = "blake2b-384-lib", .test = alg_test_hash, .fips_allowed = 0, .suite = { @@ -4324,36 +4259,13 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "blake2b-512", + .generic_driver = "blake2b-512-lib", .test = alg_test_hash, .fips_allowed = 0, .suite = { .hash = __VECS(blake2b_512_tv_template) } }, { - .alg = "blake2s-128", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_128_tv_template) - } - }, { - .alg = "blake2s-160", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_160_tv_template) - } - }, { - .alg = "blake2s-224", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_224_tv_template) - } - }, { - .alg = "blake2s-256", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_256_tv_template) - } - }, { .alg = "cbc(aes)", .test = alg_test_skcipher, .fips_allowed = 1, @@ -4367,6 +4279,12 @@ static const struct alg_test_desc alg_test_descs[] = { .cipher = __VECS(anubis_cbc_tv_template) }, }, { + .alg = "cbc(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_cbc_tv_template) + }, + }, { .alg = "cbc(blowfish)", .test = alg_test_skcipher, .suite = { @@ -4399,7 +4317,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "cbc(des3_ede)", .test = alg_test_skcipher, - .fips_allowed = 1, .suite = { .cipher = __VECS(des3_ede_cbc_tv_template) }, @@ -4445,12 +4362,17 @@ static const struct alg_test_desc alg_test_descs[] = { }, { #endif .alg = "cbcmac(aes)", - .fips_allowed = 1, .test = alg_test_hash, .suite = { .hash = __VECS(aes_cbcmac_tv_template) } }, { + .alg = "cbcmac(sm4)", + .test = alg_test_hash, + .suite = { + .hash = __VECS(sm4_cbcmac_tv_template) + } + }, { .alg = "ccm(aes)", .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))", .test = alg_test_aead, @@ -4462,20 +4384,18 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { - .alg = "cfb(aes)", - .test = alg_test_skcipher, - .fips_allowed = 1, - .suite = { - .cipher = __VECS(aes_cfb_tv_template) - }, - }, { - .alg = "cfb(sm4)", - .test = alg_test_skcipher, + .alg = "ccm(sm4)", + .generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))", + .test = alg_test_aead, .suite = { - .cipher = __VECS(sm4_cfb_tv_template) + .aead = { + ____VECS(sm4_ccm_tv_template), + .einval_allowed = 1, + } } }, { .alg = "chacha20", + .generic_driver = "chacha20-lib", .test = alg_test_skcipher, .suite = { .cipher = __VECS(chacha20_tv_template) @@ -4488,17 +4408,26 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(aes_cmac128_tv_template) } }, { + .alg = "cmac(camellia)", + .test = alg_test_hash, + .suite = { + .hash = __VECS(camellia_cmac128_tv_template) + } + }, { .alg = "cmac(des3_ede)", - .fips_allowed = 1, .test = alg_test_hash, .suite = { .hash = __VECS(des3_ede_cmac64_tv_template) } }, { - .alg = "compress_null", - .test = alg_test_null, + .alg = "cmac(sm4)", + .test = alg_test_hash, + .suite = { + .hash = __VECS(sm4_cmac128_tv_template) + } }, { .alg = "crc32", + .generic_driver = "crc32-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -4506,24 +4435,24 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "crc32c", - .test = alg_test_crc32c, + .generic_driver = "crc32c-lib", + .test = alg_test_hash, .fips_allowed = 1, .suite = { .hash = __VECS(crc32c_tv_template) } }, { - .alg = "crct10dif", - .test = alg_test_hash, + .alg = "ctr(aes)", + .test = alg_test_skcipher, .fips_allowed = 1, .suite = { - .hash = __VECS(crct10dif_tv_template) + .cipher = __VECS(aes_ctr_tv_template) } }, { - .alg = "ctr(aes)", + .alg = "ctr(aria)", .test = alg_test_skcipher, - .fips_allowed = 1, .suite = { - .cipher = __VECS(aes_ctr_tv_template) + .cipher = __VECS(aria_ctr_tv_template) } }, { .alg = "ctr(blowfish)", @@ -4558,7 +4487,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "ctr(des3_ede)", .test = alg_test_skcipher, - .fips_allowed = 1, .suite = { .cipher = __VECS(des3_ede_ctr_tv_template) } @@ -4618,10 +4546,10 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "curve25519", - .test = alg_test_kpp, + .alg = "cts(cbc(sm4))", + .test = alg_test_skcipher, .suite = { - .kpp = __VECS(curve25519_tv_template) + .cipher = __VECS(sm4_cts_tv_template) } }, { .alg = "deflate", @@ -4634,9 +4562,18 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "deflate-iaa", + .test = alg_test_comp, + .fips_allowed = 1, + .suite = { + .comp = { + .comp = __VECS(deflate_comp_tv_template), + .decomp = __VECS(deflate_decomp_tv_template) + } + } + }, { .alg = "dh", .test = alg_test_kpp, - .fips_allowed = 1, .suite = { .kpp = __VECS(dh_tv_template) } @@ -4665,14 +4602,6 @@ static const struct alg_test_desc alg_test_descs[] = { .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template) } }, { - /* - * There is no need to specifically test the DRBG with every - * backend cipher -- covered by drbg_nopr_hmac_sha256 test - */ - .alg = "drbg_nopr_hmac_sha1", - .fips_allowed = 1, - .test = alg_test_null, - }, { .alg = "drbg_nopr_hmac_sha256", .test = alg_test_drbg, .fips_allowed = 1, @@ -4680,10 +4609,13 @@ static const struct alg_test_desc alg_test_descs[] = { .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template) } }, { - /* covered by drbg_nopr_hmac_sha256 test */ + /* + * There is no need to specifically test the DRBG with every + * backend cipher -- covered by drbg_nopr_hmac_sha512 test + */ .alg = "drbg_nopr_hmac_sha384", - .fips_allowed = 1, .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_nopr_hmac_sha512", .test = alg_test_drbg, @@ -4692,10 +4624,6 @@ static const struct alg_test_desc alg_test_descs[] = { .drbg = __VECS(drbg_nopr_hmac_sha512_tv_template) } }, { - .alg = "drbg_nopr_sha1", - .fips_allowed = 1, - .test = alg_test_null, - }, { .alg = "drbg_nopr_sha256", .test = alg_test_drbg, .fips_allowed = 1, @@ -4705,8 +4633,8 @@ static const struct alg_test_desc alg_test_descs[] = { }, { /* covered by drbg_nopr_sha256 test */ .alg = "drbg_nopr_sha384", - .fips_allowed = 1, .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_nopr_sha512", .fips_allowed = 1, @@ -4728,10 +4656,6 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { - .alg = "drbg_pr_hmac_sha1", - .fips_allowed = 1, - .test = alg_test_null, - }, { .alg = "drbg_pr_hmac_sha256", .test = alg_test_drbg, .fips_allowed = 1, @@ -4741,17 +4665,13 @@ static const struct alg_test_desc alg_test_descs[] = { }, { /* covered by drbg_pr_hmac_sha256 test */ .alg = "drbg_pr_hmac_sha384", - .fips_allowed = 1, .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_pr_hmac_sha512", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "drbg_pr_sha1", - .fips_allowed = 1, - .test = alg_test_null, - }, { .alg = "drbg_pr_sha256", .test = alg_test_drbg, .fips_allowed = 1, @@ -4761,8 +4681,8 @@ static const struct alg_test_desc alg_test_descs[] = { }, { /* covered by drbg_pr_sha256 test */ .alg = "drbg_pr_sha384", - .fips_allowed = 1, .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_pr_sha512", .fips_allowed = 1, @@ -4782,12 +4702,18 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "ecb(arc4)", - .generic_driver = "ecb(arc4)-generic", + .generic_driver = "arc4-generic", .test = alg_test_skcipher, .suite = { .cipher = __VECS(arc4_tv_template) } }, { + .alg = "ecb(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_tv_template) + } + }, { .alg = "ecb(blowfish)", .test = alg_test_skcipher, .suite = { @@ -4824,7 +4750,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "ecb(des3_ede)", .test = alg_test_skcipher, - .fips_allowed = 1, .suite = { .cipher = __VECS(des3_ede_tv_template) } @@ -4923,30 +4848,40 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "ecdsa-nist-p192", - .test = alg_test_akcipher, + .test = alg_test_sig, .suite = { - .akcipher = __VECS(ecdsa_nist_p192_tv_template) + .sig = __VECS(ecdsa_nist_p192_tv_template) } }, { .alg = "ecdsa-nist-p256", - .test = alg_test_akcipher, + .test = alg_test_sig, + .fips_allowed = 1, .suite = { - .akcipher = __VECS(ecdsa_nist_p256_tv_template) + .sig = __VECS(ecdsa_nist_p256_tv_template) } }, { .alg = "ecdsa-nist-p384", - .test = alg_test_akcipher, + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(ecdsa_nist_p384_tv_template) + } + }, { + .alg = "ecdsa-nist-p521", + .test = alg_test_sig, + .fips_allowed = 1, .suite = { - .akcipher = __VECS(ecdsa_nist_p384_tv_template) + .sig = __VECS(ecdsa_nist_p521_tv_template) } }, { .alg = "ecrdsa", - .test = alg_test_akcipher, + .test = alg_test_sig, .suite = { - .akcipher = __VECS(ecrdsa_tv_template) + .sig = __VECS(ecrdsa_tv_template) } }, { .alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)", + .generic_driver = "essiv(authenc(hmac-sha256-lib,cbc(aes-generic)),sha256-lib)", .test = alg_test_aead, .fips_allowed = 1, .suite = { @@ -4954,12 +4889,50 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "essiv(cbc(aes),sha256)", + .generic_driver = "essiv(cbc(aes-generic),sha256-lib)", .test = alg_test_skcipher, .fips_allowed = 1, .suite = { .cipher = __VECS(essiv_aes_cbc_tv_template) } }, { +#if IS_ENABLED(CONFIG_CRYPTO_DH_RFC7919_GROUPS) + .alg = "ffdhe2048(dh)", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ffdhe2048_dh_tv_template) + } + }, { + .alg = "ffdhe3072(dh)", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ffdhe3072_dh_tv_template) + } + }, { + .alg = "ffdhe4096(dh)", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ffdhe4096_dh_tv_template) + } + }, { + .alg = "ffdhe6144(dh)", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ffdhe6144_dh_tv_template) + } + }, { + .alg = "ffdhe8192(dh)", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ffdhe8192_dh_tv_template) + } + }, { +#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ .alg = "gcm(aes)", .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)", .test = alg_test_aead, @@ -4968,14 +4941,35 @@ static const struct alg_test_desc alg_test_descs[] = { .aead = __VECS(aes_gcm_tv_template) } }, { + .alg = "gcm(aria)", + .generic_driver = "gcm_base(ctr(aria-generic),ghash-generic)", + .test = alg_test_aead, + .suite = { + .aead = __VECS(aria_gcm_tv_template) + } + }, { + .alg = "gcm(sm4)", + .generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)", + .test = alg_test_aead, + .suite = { + .aead = __VECS(sm4_gcm_tv_template) + } + }, { .alg = "ghash", .test = alg_test_hash, - .fips_allowed = 1, .suite = { .hash = __VECS(ghash_tv_template) } }, { + .alg = "hctr2(aes)", + .generic_driver = "hctr2_base(xctr(aes-generic),polyval-lib)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aes_hctr2_tv_template) + } + }, { .alg = "hmac(md5)", + .generic_driver = "hmac-md5-lib", .test = alg_test_hash, .suite = { .hash = __VECS(hmac_md5_tv_template) @@ -4988,6 +4982,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha1)", + .generic_driver = "hmac-sha1-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -4995,6 +4990,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha224)", + .generic_driver = "hmac-sha224-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5002,6 +4998,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha256)", + .generic_driver = "hmac-sha256-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5009,6 +5006,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha3-224)", + .generic_driver = "hmac(sha3-224-lib)", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5016,6 +5014,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha3-256)", + .generic_driver = "hmac(sha3-256-lib)", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5023,6 +5022,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha3-384)", + .generic_driver = "hmac(sha3-384-lib)", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5030,6 +5030,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha3-512)", + .generic_driver = "hmac(sha3-512-lib)", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5037,6 +5038,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha384)", + .generic_driver = "hmac-sha384-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5044,6 +5046,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "hmac(sha512)", + .generic_driver = "hmac-sha512-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5072,12 +5075,9 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { - .alg = "kw(aes)", - .test = alg_test_skcipher, - .fips_allowed = 1, - .suite = { - .cipher = __VECS(aes_kw_tv_template) - } + .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .test = alg_test_aead, + .suite.aead = __VECS(krb5_test_camellia_cts_cmac) }, { .alg = "lrw(aes)", .generic_driver = "lrw(ecb(aes-generic))", @@ -5161,6 +5161,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "md5", + .generic_driver = "md5-lib", .test = alg_test_hash, .suite = { .hash = __VECS(md5_tv_template) @@ -5178,25 +5179,23 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(nhpoly1305_tv_template) } }, { - .alg = "ofb(aes)", - .test = alg_test_skcipher, + .alg = "p1363(ecdsa-nist-p192)", + .test = alg_test_null, + }, { + .alg = "p1363(ecdsa-nist-p256)", + .test = alg_test_sig, .fips_allowed = 1, .suite = { - .cipher = __VECS(aes_ofb_tv_template) + .sig = __VECS(p1363_ecdsa_nist_p256_tv_template) } }, { - /* Same as ofb(aes) except the key is stored in - * hardware secure memory which we reference by index - */ - .alg = "ofb(paes)", + .alg = "p1363(ecdsa-nist-p384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "ofb(sm4)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(sm4_ofb_tv_template) - } + .alg = "p1363(ecdsa-nist-p521)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "pcbc(fcrypt)", .test = alg_test_skcipher, @@ -5204,31 +5203,77 @@ static const struct alg_test_desc alg_test_descs[] = { .cipher = __VECS(fcrypt_pcbc_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha224)", - .test = alg_test_null, +#if IS_ENABLED(CONFIG_CRYPTO_PHMAC_S390) + .alg = "phmac(sha224)", + .test = alg_test_hash, .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha224_tv_template) + } }, { - .alg = "pkcs1pad(rsa,sha256)", - .test = alg_test_akcipher, + .alg = "phmac(sha256)", + .test = alg_test_hash, .fips_allowed = 1, .suite = { - .akcipher = __VECS(pkcs1pad_rsa_tv_template) + .hash = __VECS(hmac_sha256_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha384)", - .test = alg_test_null, + .alg = "phmac(sha384)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha384_tv_template) + } + }, { + .alg = "phmac(sha512)", + .test = alg_test_hash, .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha512_tv_template) + } }, { - .alg = "pkcs1pad(rsa,sha512)", +#endif + .alg = "pkcs1(rsa,none)", + .test = alg_test_sig, + .suite = { + .sig = __VECS(pkcs1_rsa_none_tv_template) + } + }, { + .alg = "pkcs1(rsa,sha224)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "poly1305", - .test = alg_test_hash, + .alg = "pkcs1(rsa,sha256)", + .test = alg_test_sig, + .fips_allowed = 1, .suite = { - .hash = __VECS(poly1305_tv_template) + .sig = __VECS(pkcs1_rsa_tv_template) } }, { + .alg = "pkcs1(rsa,sha3-256)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1(rsa,sha3-384)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1(rsa,sha3-512)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1(rsa,sha384)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1(rsa,sha512)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa)", + .test = alg_test_null, + .fips_allowed = 1, + }, { .alg = "rfc3686(ctr(aes))", .test = alg_test_skcipher, .fips_allowed = 1, @@ -5278,12 +5323,14 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "rfc7539(chacha20,poly1305)", + .generic_driver = "rfc7539(chacha20-lib,poly1305-generic)", .test = alg_test_aead, .suite = { .aead = __VECS(rfc7539_tv_template) } }, { .alg = "rfc7539esp(chacha20,poly1305)", + .generic_driver = "rfc7539esp(chacha20-lib,poly1305-generic)", .test = alg_test_aead, .suite = { .aead = { @@ -5307,6 +5354,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha1", + .generic_driver = "sha1-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5314,6 +5362,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha224", + .generic_driver = "sha224-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5321,6 +5370,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha256", + .generic_driver = "sha256-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5328,6 +5378,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha3-224", + .generic_driver = "sha3-224-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5335,6 +5386,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha3-256", + .generic_driver = "sha3-256-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5342,6 +5394,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha3-384", + .generic_driver = "sha3-384-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5349,6 +5402,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha3-512", + .generic_driver = "sha3-512-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5356,6 +5410,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha384", + .generic_driver = "sha384-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -5363,18 +5418,13 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "sha512", + .generic_driver = "sha512-lib", .test = alg_test_hash, .fips_allowed = 1, .suite = { .hash = __VECS(sha512_tv_template) } }, { - .alg = "sm2", - .test = alg_test_akcipher, - .suite = { - .akcipher = __VECS(sm2_tv_template) - } - }, { .alg = "sm3", .test = alg_test_hash, .suite = { @@ -5393,12 +5443,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(streebog512_tv_template) } }, { - .alg = "vmac64(aes)", - .test = alg_test_hash, - .suite = { - .hash = __VECS(vmac64_aes_tv_template) - } - }, { .alg = "wp256", .test = alg_test_hash, .suite = { @@ -5417,24 +5461,65 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(wp512_tv_template) } }, { + .alg = "x962(ecdsa-nist-p192)", + .test = alg_test_sig, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p192_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p256)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p256_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p384)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p384_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p521)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p521_tv_template) + } + }, { .alg = "xcbc(aes)", .test = alg_test_hash, .suite = { .hash = __VECS(aes_xcbc128_tv_template) } }, { + .alg = "xcbc(sm4)", + .test = alg_test_hash, + .suite = { + .hash = __VECS(sm4_xcbc128_tv_template) + } + }, { .alg = "xchacha12", + .generic_driver = "xchacha12-lib", .test = alg_test_skcipher, .suite = { .cipher = __VECS(xchacha12_tv_template) }, }, { .alg = "xchacha20", + .generic_driver = "xchacha20-lib", .test = alg_test_skcipher, .suite = { .cipher = __VECS(xchacha20_tv_template) }, }, { + .alg = "xctr(aes)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aes_xctr_tv_template) + } + }, { .alg = "xts(aes)", .generic_driver = "xts(ecb(aes-generic))", .test = alg_test_skcipher, @@ -5471,6 +5556,13 @@ static const struct alg_test_desc alg_test_descs[] = { .cipher = __VECS(serpent_xts_tv_template) } }, { + .alg = "xts(sm4)", + .generic_driver = "xts(ecb(sm4-generic))", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(sm4_xts_tv_template) + } + }, { .alg = "xts(twofish)", .generic_driver = "xts(ecb(twofish-generic))", .test = alg_test_skcipher, @@ -5487,14 +5579,6 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { #endif - .alg = "xts4096(paes)", - .test = alg_test_null, - .fips_allowed = 1, - }, { - .alg = "xts512(paes)", - .test = alg_test_null, - .fips_allowed = 1, - }, { .alg = "xxhash64", .test = alg_test_hash, .fips_allowed = 1, @@ -5502,16 +5586,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(xxhash64_tv_template) } }, { - .alg = "zlib-deflate", - .test = alg_test_comp, - .fips_allowed = 1, - .suite = { - .comp = { - .comp = __VECS(zlib_deflate_comp_tv_template), - .decomp = __VECS(zlib_deflate_decomp_tv_template) - } - } - }, { .alg = "zstd", .test = alg_test_comp, .fips_allowed = 1, @@ -5563,9 +5637,8 @@ static void testmgr_onetime_init(void) alg_check_test_descs_order(); alg_check_testvec_configs(); -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n"); -#endif + if (!noslowtests) + pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n"); } static int alg_find_test(const char *alg) @@ -5593,6 +5666,13 @@ static int alg_find_test(const char *alg) return -1; } +static int alg_fips_disabled(const char *driver, const char *alg) +{ + pr_info("alg: %s (%s) is disabled due to FIPS\n", alg, driver); + + return -ECANCELED; +} + int alg_test(const char *driver, const char *alg, u32 type, u32 mask) { int i; @@ -5629,9 +5709,13 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) if (i < 0 && j < 0) goto notest; - if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || - (j >= 0 && !alg_test_descs[j].fips_allowed))) - goto non_fips_alg; + if (fips_enabled) { + if (j >= 0 && !alg_test_descs[j].fips_allowed) + return -EINVAL; + + if (i >= 0 && !alg_test_descs[i].fips_allowed) + goto non_fips_alg; + } rc = 0; if (i >= 0) @@ -5643,14 +5727,16 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) test_done: if (rc) { - if (fips_enabled || panic_on_fail) { + if (fips_enabled) { fips_fail_notify(); - panic("alg: self-tests for %s (%s) failed in %s mode!\n", - driver, alg, - fips_enabled ? "fips" : "panic_on_fail"); - } - WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)", - driver, alg, rc); + panic("alg: self-tests for %s (%s) failed in fips mode!\n", + driver, alg); + } + pr_warn("alg: self-tests for %s using %s failed (rc=%d)", + alg, driver, rc); + WARN(rc != -ENOENT, + "alg: self-tests for %s using %s failed (rc=%d)", + alg, driver, rc); } else { if (fips_enabled) pr_info("alg: self-tests for %s (%s) passed\n", @@ -5660,12 +5746,35 @@ test_done: return rc; notest: + if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_LSKCIPHER) { + char nalg[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >= + sizeof(nalg)) + goto notest2; + + i = alg_find_test(nalg); + if (i < 0) + goto notest2; + + if (fips_enabled && !alg_test_descs[i].fips_allowed) + goto non_fips_alg; + + rc = alg_test_skcipher(alg_test_descs + i, driver, type, mask); + goto test_done; + } + +notest2: printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); + + if (type & CRYPTO_ALG_FIPS_INTERNAL) + return alg_fips_disabled(driver, alg); + return 0; non_fips_alg: - return -EINVAL; + return alg_fips_disabled(driver, alg); } -#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ +#endif /* CONFIG_CRYPTO_SELFTESTS */ EXPORT_SYMBOL_GPL(alg_test); diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 3ed6ab34ab51..80bf5f1b67a6 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -21,6 +21,7 @@ #define _CRYPTO_TESTMGR_H #include <linux/oid_registry.h> +#include <crypto/internal/ecc.h> #define MAX_IVLEN 32 @@ -33,6 +34,7 @@ * @ksize: Length of @key in bytes (0 if no key) * @setkey_error: Expected error from setkey() * @digest_error: Expected error from digest() + * @fips_skip: Skip the test vector in FIPS mode */ struct hash_testvec { const char *key; @@ -42,6 +44,7 @@ struct hash_testvec { unsigned short ksize; int setkey_error; int digest_error; + bool fips_skip; }; /* @@ -56,8 +59,6 @@ struct hash_testvec { * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * ( e.g. test needs to fail due to a weak key ) * @fips_skip: Skip the test vector in FIPS mode - * @generates_iv: Encryption should ignore the given IV, and output @iv_out. - * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). * @setkey_error: Expected error from setkey() * @crypt_error: Expected error from encrypt() and decrypt() */ @@ -71,7 +72,6 @@ struct cipher_testvec { unsigned short klen; unsigned int len; bool fips_skip; - bool generates_iv; int setkey_error; int crypt_error; }; @@ -119,18 +119,6 @@ struct aead_testvec { int crypt_error; }; -struct cprng_testvec { - const char *key; - const char *dt; - const char *v; - const char *result; - unsigned char klen; - unsigned short dtlen; - unsigned short vlen; - unsigned short rlen; - unsigned short loops; -}; - struct drbg_testvec { const unsigned char *entropy; size_t entropylen; @@ -148,6 +136,16 @@ struct drbg_testvec { struct akcipher_testvec { const unsigned char *key; + const unsigned char *m; + const unsigned char *c; + unsigned int key_len; + unsigned int m_size; + unsigned int c_size; + bool public_key_vec; +}; + +struct sig_testvec { + const unsigned char *key; const unsigned char *params; const unsigned char *m; const unsigned char *c; @@ -156,7 +154,6 @@ struct akcipher_testvec { unsigned int m_size; unsigned int c_size; bool public_key_vec; - bool siggen_sigver_test; enum OID algo; }; @@ -183,8 +180,8 @@ static const struct akcipher_testvec rsa_tv_template[] = { { #ifndef CONFIG_CRYPTO_FIPS .key = - "\x30\x81\x9A" /* sequence of 154 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ + "\x30\x82\x01\x38" /* sequence of 312 bytes */ + "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x41" /* modulus - integer of 65 bytes */ "\x00\xAA\x36\xAB\xCE\x88\xAC\xFD\xFF\x55\x52\x3C\x7F\xC4\x52\x3F" "\x90\xEF\xA0\x0D\xF3\x77\x4A\x25\x9F\x2E\x62\xB4\xC5\xD9\x9C\xB5" @@ -197,24 +194,37 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\xC2\xCD\x2D\xFF\x43\x40\x98\xCD\x20\xD8\xA1\x38\xD0\x90\xBF\x64" "\x79\x7C\x3F\xA7\xA2\xCD\xCB\x3C\xD1\xE0\xBD\xBA\x26\x54\xB4\xF9" "\xDF\x8E\x8A\xE5\x9D\x73\x3D\x9F\x33\xB3\x01\x62\x4A\xFD\x1D\x51" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ + "\x02\x21" /* prime1 - integer of 33 bytes */ + "\x00\xD8\x40\xB4\x16\x66\xB4\x2E\x92\xEA\x0D\xA3\xB4\x32\x04\xB5" + "\xCF\xCE\x33\x52\x52\x4D\x04\x16\xA5\xA4\x41\xE7\x00\xAF\x46\x12" + "\x0D" + "\x02\x21" /* prime2 - integer of 33 bytes */ + "\x00\xC9\x7F\xB1\xF0\x27\xF4\x53\xF6\x34\x12\x33\xEA\xAA\xD1\xD9" + "\x35\x3F\x6C\x42\xD0\x88\x66\xB1\xD0\x5A\x0F\x20\x35\x02\x8B\x9D" + "\x89" + "\x02\x20" /* exponent1 - integer of 32 bytes */ + "\x59\x0B\x95\x72\xA2\xC2\xA9\xC4\x06\x05\x9D\xC2\xAB\x2F\x1D\xAF" + "\xEB\x7E\x8B\x4F\x10\xA7\x54\x9E\x8E\xED\xF5\xB4\xFC\xE0\x9E\x05" + "\x02\x21" /* exponent2 - integer of 33 bytes */ + "\x00\x8E\x3C\x05\x21\xFE\x15\xE0\xEA\x06\xA3\x6F\xF0\xF1\x0C\x99" + "\x52\xC3\x5B\x7A\x75\x14\xFD\x32\x38\xB8\x0A\xAD\x52\x98\x62\x8D" + "\x51" + "\x02\x20" /* coefficient - integer of 32 bytes */ + "\x36\x3F\xF7\x18\x9D\xA8\xE9\x0B\x1D\x34\x1F\x71\xD0\x9B\x76\xA8" + "\xA9\x43\xE1\x1D\x10\xB2\x4D\x24\x9F\x2D\xEA\xFE\xF8\x0C\x18\x26", .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\x63\x1c\xcd\x7b\xe1\x7e\xe4\xde\xc9\xa8\x89\xa1\x74\xcb\x3c\x63" "\x7d\x24\xec\x83\xc3\x15\xe4\x7f\x73\x05\x34\xd1\xec\x22\xbb\x8a" "\x5e\x32\x39\x6d\xc1\x1d\x7d\x50\x3b\x9f\x7a\xad\xf0\x2e\x25\x53" "\x9f\x6e\xbd\x4c\x55\x84\x0c\x9b\xcf\x1a\x4b\x51\x1e\x9e\x0c\x06", - .key_len = 157, + .key_len = 316, .m_size = 8, .c_size = 64, }, { .key = - "\x30\x82\x01\x1D" /* sequence of 285 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ + "\x30\x82\x02\x5B" /* sequence of 603 bytes */ + "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x81\x81" /* modulus - integer of 129 bytes */ "\x00\xBB\xF8\x2F\x09\x06\x82\xCE\x9C\x23\x38\xAC\x2B\x9D\xA8\x71" "\xF7\x36\x8D\x07\xEE\xD4\x10\x43\xA4\x40\xD6\xB6\xF0\x74\x54\xF5" @@ -236,12 +246,35 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x93\x99\x26\xED\x4F\x74\xA1\x3E\xDD\xFB\xE1\xA1\xCE\xCC\x48\x94" "\xAF\x94\x28\xC2\xB7\xB8\x88\x3F\xE4\x46\x3A\x4B\xC8\x5B\x1C\xB3" "\xC1" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ - .key_len = 289, + "\x02\x41" /* prime1 - integer of 65 bytes */ + "\x00\xEE\xCF\xAE\x81\xB1\xB9\xB3\xC9\x08\x81\x0B\x10\xA1\xB5\x60" + "\x01\x99\xEB\x9F\x44\xAE\xF4\xFD\xA4\x93\xB8\x1A\x9E\x3D\x84\xF6" + "\x32\x12\x4E\xF0\x23\x6E\x5D\x1E\x3B\x7E\x28\xFA\xE7\xAA\x04\x0A" + "\x2D\x5B\x25\x21\x76\x45\x9D\x1F\x39\x75\x41\xBA\x2A\x58\xFB\x65" + "\x99" + "\x02\x41" /* prime2 - integer of 65 bytes */ + "\x00\xC9\x7F\xB1\xF0\x27\xF4\x53\xF6\x34\x12\x33\xEA\xAA\xD1\xD9" + "\x35\x3F\x6C\x42\xD0\x88\x66\xB1\xD0\x5A\x0F\x20\x35\x02\x8B\x9D" + "\x86\x98\x40\xB4\x16\x66\xB4\x2E\x92\xEA\x0D\xA3\xB4\x32\x04\xB5" + "\xCF\xCE\x33\x52\x52\x4D\x04\x16\xA5\xA4\x41\xE7\x00\xAF\x46\x15" + "\x03" + "\x02\x40" /* exponent1 - integer of 64 bytes */ + "\x54\x49\x4C\xA6\x3E\xBA\x03\x37\xE4\xE2\x40\x23\xFC\xD6\x9A\x5A" + "\xEB\x07\xDD\xDC\x01\x83\xA4\xD0\xAC\x9B\x54\xB0\x51\xF2\xB1\x3E" + "\xD9\x49\x09\x75\xEA\xB7\x74\x14\xFF\x59\xC1\xF7\x69\x2E\x9A\x2E" + "\x20\x2B\x38\xFC\x91\x0A\x47\x41\x74\xAD\xC9\x3C\x1F\x67\xC9\x81" + "\x02\x40" /* exponent2 - integer of 64 bytes */ + "\x47\x1E\x02\x90\xFF\x0A\xF0\x75\x03\x51\xB7\xF8\x78\x86\x4C\xA9" + "\x61\xAD\xBD\x3A\x8A\x7E\x99\x1C\x5C\x05\x56\xA9\x4C\x31\x46\xA7" + "\xF9\x80\x3F\x8F\x6F\x8A\xE3\x42\xE9\x31\xFD\x8A\xE4\x7A\x22\x0D" + "\x1B\x99\xA4\x95\x84\x98\x07\xFE\x39\xF9\x24\x5A\x98\x36\xDA\x3D" + "\x02\x41" /* coefficient - integer of 65 bytes */ + "\x00\xB0\x6C\x4F\xDA\xBB\x63\x01\x19\x8D\x26\x5B\xDB\xAE\x94\x23" + "\xB3\x80\xF2\x71\xF7\x34\x53\x88\x50\x93\x07\x7F\xCD\x39\xE2\x11" + "\x9F\xC9\x86\x32\x15\x4F\x58\x83\xB1\x67\xA9\x67\xBF\x40\x2B\x4E" + "\x9E\x2E\x0F\x96\x56\xE6\x98\xEA\x36\x66\xED\xFB\x25\x79\x80\x39" + "\xF7", + .key_len = 607, .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\x74\x1b\x55\xac\x47\xb5\x08\x0a\x6e\x2b\x2d\xf7\x94\xb8\x8a\x95" @@ -257,9 +290,9 @@ static const struct akcipher_testvec rsa_tv_template[] = { }, { #endif .key = - "\x30\x82\x02\x1F" /* sequence of 543 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ - "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */ + "\x30\x82\x04\xA3" /* sequence of 1187 bytes */ + "\x02\x01\x00" /* version - integer of 1 byte */ + "\x02\x82\x01\x01\x00" /* modulus - integer of 256 bytes */ "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" "\xC6\x67\xFF\x1D\x1E\x3C\x1D\xC1\xB5\x5F\x6C\xC0\xB2\x07\x3A\x6D" @@ -294,12 +327,55 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x62\xFF\xE9\x46\xB8\xD8\x44\xDB\xA5\xCC\x31\x54\x34\xCE\x3E\x82" "\xD6\xBF\x7A\x0B\x64\x21\x6D\x88\x7E\x5B\x45\x12\x1E\x63\x8D\x49" "\xA7\x1D\xD9\x1E\x06\xCD\xE8\xBA\x2C\x8C\x69\x32\xEA\xBE\x60\x71" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ - .key_len = 547, + "\x02\x81\x81" /* prime1 - integer of 129 bytes */ + "\x00\xFA\xAC\xE1\x37\x5E\x32\x11\x34\xC6\x72\x58\x2D\x91\x06\x3E" + "\x77\xE7\x11\x21\xCD\x4A\xF8\xA4\x3F\x0F\xEF\x31\xE3\xF3\x55\xA0" + "\xB9\xAC\xB6\xCB\xBB\x41\xD0\x32\x81\x9A\x8F\x7A\x99\x30\x77\x6C" + "\x68\x27\xE2\x96\xB5\x72\xC9\xC3\xD4\x42\xAA\xAA\xCA\x95\x8F\xFF" + "\xC9\x9B\x52\x34\x30\x1D\xCF\xFE\xCF\x3C\x56\x68\x6E\xEF\xE7\x6C" + "\xD7\xFB\x99\xF5\x4A\xA5\x21\x1F\x2B\xEA\x93\xE8\x98\x26\xC4\x6E" + "\x42\x21\x5E\xA0\xA1\x2A\x58\x35\xBB\x10\xE7\xBA\x27\x0A\x3B\xB3" + "\xAF\xE2\x75\x36\x04\xAC\x56\xA0\xAB\x52\xDE\xCE\xDD\x2C\x28\x77" + "\x03" + "\x02\x81\x81" /* prime2 - integer of 129 bytes */ + "\x00\xDF\xB7\x52\xB6\xD7\xC0\xE2\x96\xE7\xC9\xFE\x5D\x71\x5A\xC4" + "\x40\x96\x2F\xE5\x87\xEA\xF3\xA5\x77\x11\x67\x3C\x8D\x56\x08\xA7" + "\xB5\x67\xFA\x37\xA8\xB8\xCF\x61\xE8\x63\xD8\x38\x06\x21\x2B\x92" + "\x09\xA6\x39\x3A\xEA\xA8\xB4\x45\x4B\x36\x10\x4C\xE4\x00\x66\x71" + "\x65\xF8\x0B\x94\x59\x4F\x8C\xFD\xD5\x34\xA2\xE7\x62\x84\x0A\xA7" + "\xBB\xDB\xD9\x8A\xCD\x05\xE1\xCC\x57\x7B\xF1\xF1\x1F\x11\x9D\xBA" + "\x3E\x45\x18\x99\x1B\x41\x64\x43\xEE\x97\x5D\x77\x13\x5B\x74\x69" + "\x73\x87\x95\x05\x07\xBE\x45\x07\x17\x7E\x4A\x69\x22\xF3\xDB\x05" + "\x39" + "\x02\x81\x80" /* exponent1 - integer of 128 bytes */ + "\x5E\xD8\xDC\xDA\x53\x44\xC4\x67\xE0\x92\x51\x34\xE4\x83\xA5\x4D" + "\x3E\xDB\xA7\x9B\x82\xBB\x73\x81\xFC\xE8\x77\x4B\x15\xBE\x17\x73" + "\x49\x9B\x5C\x98\xBC\xBD\x26\xEF\x0C\xE9\x2E\xED\x19\x7E\x86\x41" + "\x1E\x9E\x48\x81\xDD\x2D\xE4\x6F\xC2\xCD\xCA\x93\x9E\x65\x7E\xD5" + "\xEC\x73\xFD\x15\x1B\xA2\xA0\x7A\x0F\x0D\x6E\xB4\x53\x07\x90\x92" + "\x64\x3B\x8B\xA9\x33\xB3\xC5\x94\x9B\x4C\x5D\x9C\x7C\x46\xA4\xA5" + "\x56\xF4\xF3\xF8\x27\x0A\x7B\x42\x0D\x92\x70\x47\xE7\x42\x51\xA9" + "\xC2\x18\xB1\x58\xB1\x50\x91\xB8\x61\x41\xB6\xA9\xCE\xD4\x7C\xBB" + "\x02\x81\x80" /* exponent2 - integer of 128 bytes */ + "\x54\x09\x1F\x0F\x03\xD8\xB6\xC5\x0C\xE8\xB9\x9E\x0C\x38\x96\x43" + "\xD4\xA6\xC5\x47\xDB\x20\x0E\xE5\xBD\x29\xD4\x7B\x1A\xF8\x41\x57" + "\x49\x69\x9A\x82\xCC\x79\x4A\x43\xEB\x4D\x8B\x2D\xF2\x43\xD5\xA5" + "\xBE\x44\xFD\x36\xAC\x8C\x9B\x02\xF7\x9A\x03\xE8\x19\xA6\x61\xAE" + "\x76\x10\x93\x77\x41\x04\xAB\x4C\xED\x6A\xCC\x14\x1B\x99\x8D\x0C" + "\x6A\x37\x3B\x86\x6C\x51\x37\x5B\x1D\x79\xF2\xA3\x43\x10\xC6\xA7" + "\x21\x79\x6D\xF9\xE9\x04\x6A\xE8\x32\xFF\xAE\xFD\x1C\x7B\x8C\x29" + "\x13\xA3\x0C\xB2\xAD\xEC\x6C\x0F\x8D\x27\x12\x7B\x48\xB2\xDB\x31" + "\x02\x81\x81" /* coefficient - integer of 129 bytes */ + "\x00\x8D\x1B\x05\xCA\x24\x1F\x0C\x53\x19\x52\x74\x63\x21\xFA\x78" + "\x46\x79\xAF\x5C\xDE\x30\xA4\x6C\x20\x38\xE6\x97\x39\xB8\x7A\x70" + "\x0D\x8B\x6C\x6D\x13\x74\xD5\x1C\xDE\xA9\xF4\x60\x37\xFE\x68\x77" + "\x5E\x0B\x4E\x5E\x03\x31\x30\xDF\xD6\xAE\x85\xD0\x81\xBB\x61\xC7" + "\xB1\x04\x5A\xC4\x6D\x56\x1C\xD9\x64\xE7\x85\x7F\x88\x91\xC9\x60" + "\x28\x05\xE2\xC6\x24\x8F\xDD\x61\x64\xD8\x09\xDE\x7E\xD3\x4A\x61" + "\x1A\xD3\x73\x58\x4B\xD8\xA0\x54\x25\x48\x83\x6F\x82\x6C\xAF\x36" + "\x51\x2A\x5D\x14\x2F\x41\x25\x00\xDD\xF8\xF3\x95\xFE\x31\x25\x50" + "\x12", + .key_len = 1191, .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" @@ -566,26 +642,713 @@ static const struct akcipher_testvec rsa_tv_template[] = { } }; +#ifdef CONFIG_CPU_BIG_ENDIAN +#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \ + 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8 +#else +#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \ + 0x##b8, 0x##b7, 0x##b6, 0x##b5, 0x##b4, 0x##b3, 0x##b2, 0x##b1 +#endif + /* * ECDSA test vectors. */ -static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { +static const struct sig_testvec ecdsa_nist_p192_tv_template[] = { { - .key = + .key = /* secp192r1(sha1) */ + "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" + "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" + "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" + "\x98", + .key_len = 49, + .m = + "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" + "\x63\x85\xe7\x82", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ad, 59, ad, 88, 27, d6, 92, 6b), + be64_to_cpua(a0, 27, 91, c6, f6, 7f, c3, 09), + be64_to_cpua(ba, e5, 93, 83, 6e, b6, 3b, 63), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(86, 80, 6f, a5, 79, 77, da, d0), + be64_to_cpua(ef, 95, 52, 7b, a0, 0f, e4, 18), + be64_to_cpua(10, 68, 01, 9d, ba, ce, 83, 08), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha224) */ + "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" + "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" + "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" + "\xa3", + .key_len = 49, + .m = + "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40" + "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(83, 7b, 12, e6, b6, 5b, cb, d4), + be64_to_cpua(14, f8, 11, 2b, 55, dc, ae, 37), + be64_to_cpua(5a, 8b, 82, 69, 7e, 8a, 0a, 09), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(a3, e3, 5c, 99, db, 92, 5b, 36), + be64_to_cpua(eb, c3, 92, 0f, 1e, 72, ee, c4), + be64_to_cpua(6a, 14, 4f, 53, 75, c8, 02, 48), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha256) */ + "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae" + "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56" + "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58" + "\x91", + .key_len = 49, + .m = + "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd" + "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(01, 48, fb, 5f, 72, 2a, d4, 8f), + be64_to_cpua(6b, 1a, 58, 56, f1, 8f, f7, fd), + be64_to_cpua(3f, 72, 3f, 1f, 42, d2, 3f, 1d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(7d, 3a, 97, d9, cd, 1a, 6a, 49), + be64_to_cpua(32, dd, 41, 74, 6a, 51, c7, d9), + be64_to_cpua(b3, 69, 43, fd, 48, 19, 86, cf), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha384) */ + "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7" + "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f" + "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e" + "\x8b", + .key_len = 49, + .m = + "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9" + "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61" + "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(dd, 15, bb, d6, 8c, a7, 03, 78), + be64_to_cpua(cf, 7f, 34, b4, b4, e5, c5, 00), + be64_to_cpua(f0, a3, 38, ce, 2b, f8, 9d, 1a), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(93, 12, 3b, 3b, 28, fb, 6d, e1), + be64_to_cpua(d1, 01, 77, 44, 5d, 53, a4, 7c), + be64_to_cpua(64, bc, 5a, 1f, 82, 96, 61, d7), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha512) */ + "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea" + "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d" + "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11" + "\x57", + .key_len = 49, + .m = + "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23" + "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67" + "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70" + "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(2b, 11, 2d, 1c, b6, 06, c9, 6c), + be64_to_cpua(dd, 3f, 07, 87, 12, a0, d4, ac), + be64_to_cpua(88, 5b, 8f, 59, 43, bf, cf, c6), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(28, 6a, df, 97, fd, 82, 76, 24), + be64_to_cpua(a9, 14, 2a, 5e, f5, e5, fb, 72), + be64_to_cpua(73, b4, 22, 9a, 98, 73, 3c, 83), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p256_tv_template[] = { + { + .key = /* secp256r1(sha1) */ + "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" + "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" + "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" + "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" + "\xaf", + .key_len = 65, + .m = + "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" + "\x0b\xde\x6a\x42", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ee, ca, 6a, 52, 0e, 48, 4d, cc), + be64_to_cpua(f7, d4, ad, 8d, 94, 5a, 69, 89), + be64_to_cpua(cf, d4, e7, b7, f0, 82, 56, 41), + be64_to_cpua(f9, 25, ce, 9f, 3a, a6, 35, 81), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(fb, 9d, 8b, de, d4, 8d, 6f, ad), + be64_to_cpua(f1, 03, 03, f3, 3b, e2, 73, f7), + be64_to_cpua(8a, fa, 54, 93, 29, a7, 70, 86), + be64_to_cpua(d7, e4, ef, 52, 66, d3, 5b, 9d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha224) */ + "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" + "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" + "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" + "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0" + "\xd4", + .key_len = 65, + .m = + "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41" + "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(7d, 25, d8, 25, f5, 81, d2, 1e), + be64_to_cpua(34, 62, 79, cb, 6a, 91, 67, 2e), + be64_to_cpua(ae, ce, 77, 59, 1a, db, 59, d5), + be64_to_cpua(20, 43, fa, c0, 9f, 9d, 7b, e7), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(ce, d5, 2e, 8b, de, 5a, 04, 0e), + be64_to_cpua(bf, 50, 05, 58, 39, 0e, 26, 92), + be64_to_cpua(76, 20, 4a, 77, 22, ec, c8, 66), + be64_to_cpua(5f, f8, 74, f8, 57, d0, 5e, 54), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha256) */ + "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" + "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" + "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" + "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" + "\xb8", + .key_len = 65, + .m = + "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" + "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(91, dc, 02, 67, dc, 0c, d0, 82), + be64_to_cpua(ac, 44, c3, e8, 24, 11, 2d, a4), + be64_to_cpua(09, dc, 29, 63, a8, 1a, ad, fc), + be64_to_cpua(08, 31, fa, 74, 0d, 1d, 21, 5d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(4f, 2a, 65, 35, 23, e3, 1d, fa), + be64_to_cpua(0a, 6e, 1b, c4, af, e1, 83, c3), + be64_to_cpua(f9, a9, 81, ac, 4a, 50, d0, 91), + be64_to_cpua(bd, ff, ce, ee, 42, c3, 97, ff), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha384) */ + "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d" + "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e" + "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00" + "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2" + "\x7c", + .key_len = 65, + .m = + "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6" + "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94" + "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(f2, e4, 6c, c7, 94, b1, d5, fe), + be64_to_cpua(08, b2, 6b, 24, 94, 48, 46, 5e), + be64_to_cpua(d0, 2e, 95, 54, d1, 95, 64, 93), + be64_to_cpua(8e, f3, 6f, dc, f8, 69, a6, 2e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(c0, 60, 11, 92, dc, 17, 89, 12), + be64_to_cpua(69, f4, 3b, 4f, 47, cf, 9b, 16), + be64_to_cpua(19, fb, 5f, 92, f4, c9, 23, 37), + be64_to_cpua(eb, a7, 80, 26, dc, f9, 3a, 44), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha512) */ + "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a" + "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38" + "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b" + "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92" + "\xbf", + .key_len = 65, + .m = + "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9" + "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca" + "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf" + "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(76, f6, 04, 99, 09, 37, 4d, fa), + be64_to_cpua(ed, 8c, 73, 30, 6c, 22, b3, 97), + be64_to_cpua(40, ea, 44, 81, 00, 4e, 29, 08), + be64_to_cpua(b8, 6d, 87, 81, 43, df, fb, 9f), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(76, 31, 79, 4a, e9, 81, 6a, ee), + be64_to_cpua(5c, ad, c3, 78, 1c, c2, c1, 19), + be64_to_cpua(f8, 00, dd, ab, d4, c0, 2b, e6), + be64_to_cpua(1e, b9, 75, 31, f6, 04, a5, 4d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p384_tv_template[] = { + { + .key = /* secp384r1(sha1) */ + "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" + "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" + "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" + "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" + "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" + "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" + "\xf1", + .key_len = 97, + .m = + "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" + "\x3a\x69\xc1\x93", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ec, 7c, 7e, d0, 87, d7, d7, 6e), + be64_to_cpua(78, f1, 4c, 26, e6, 5b, 86, cf), + be64_to_cpua(3a, c6, f1, 32, 3c, ce, 70, 2b), + be64_to_cpua(8d, 26, 8e, ae, 63, 3f, bc, 20), + be64_to_cpua(57, 55, 07, 20, 43, 30, de, a0), + be64_to_cpua(f5, 0f, 24, 4c, 07, 93, 6f, 21), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(79, 12, 2a, b7, c5, 15, 92, c5), + be64_to_cpua(4a, a1, 59, f1, 1c, a4, 58, 26), + be64_to_cpua(74, a0, 0f, bf, af, c3, 36, 76), + be64_to_cpua(df, 28, 8c, 1b, fa, f9, 95, 88), + be64_to_cpua(5f, 63, b1, be, 5e, 4c, 0e, a1), + be64_to_cpua(cd, bb, 7e, 81, 5d, 8f, 63, c0), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha224) */ + "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" + "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" + "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05" + "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc" + "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50" + "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d" + "\xe0", + .key_len = 97, + .m = + "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd" + "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(3f, dd, 15, 1b, 68, 2b, 9d, 8b), + be64_to_cpua(c9, 9c, 11, b8, 10, 01, c5, 41), + be64_to_cpua(c5, da, b4, e3, 93, 07, e0, 99), + be64_to_cpua(97, f1, c8, 72, 26, cf, 5a, 5e), + be64_to_cpua(ec, cb, e4, 89, 47, b2, f7, bc), + be64_to_cpua(8a, 51, 84, ce, 13, 1e, d2, dc), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(88, 2b, 82, 26, 5e, 1c, da, fb), + be64_to_cpua(9f, 19, d0, 42, 8b, 93, c2, 11), + be64_to_cpua(4d, d0, c6, 6e, b0, e9, fc, 14), + be64_to_cpua(df, d8, 68, a2, 64, 42, 65, f3), + be64_to_cpua(4b, 00, 08, 31, 6c, f5, d5, f6), + be64_to_cpua(8b, 03, 2c, fc, 1f, d1, a9, a4), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha256) */ + "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a" + "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a" + "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e" + "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4" + "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52" + "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc" + "\xab", + .key_len = 97, + .m = + "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3" + "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(c8, 8d, 2c, 79, 3a, 8e, 32, c4), + be64_to_cpua(b6, c6, fc, 70, 2e, 66, 3c, 77), + be64_to_cpua(af, 06, 3f, 84, 04, e2, f9, 67), + be64_to_cpua(cc, 47, 53, 87, bc, bd, 83, 3f), + be64_to_cpua(8e, 3f, 7e, ce, 0a, 9b, aa, 59), + be64_to_cpua(08, 09, 12, 9d, 6e, 96, 64, a6), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(10, 0e, f4, 1f, 39, ca, 4d, 43), + be64_to_cpua(4f, 8d, de, 1e, 93, 8d, 95, bb), + be64_to_cpua(15, 68, c0, 75, 3e, 23, 5e, 36), + be64_to_cpua(dd, ce, bc, b2, 97, f4, 9c, f3), + be64_to_cpua(26, a2, b0, 89, 42, 0a, da, d9), + be64_to_cpua(40, 34, b8, 90, a9, 80, ab, 47), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha384) */ + "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f" + "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9" + "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c" + "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a" + "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9" + "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89" + "\x9e", + .key_len = 97, + .m = + "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf" + "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1" + "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(a2, a4, c8, f2, ea, 9d, 11, 1f), + be64_to_cpua(3b, 1f, 07, 8f, 15, 02, fe, 1d), + be64_to_cpua(29, e6, fb, ca, 8c, d6, b6, b4), + be64_to_cpua(2d, 7a, 91, 5f, 49, 2d, 22, 08), + be64_to_cpua(ee, 2e, 62, 35, 46, fa, 00, d8), + be64_to_cpua(9b, 28, 68, c0, a1, ea, 8c, 50), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(ab, 8d, 4e, de, e6, 6d, 9b, 66), + be64_to_cpua(96, 17, 04, c9, 05, 77, f1, 8e), + be64_to_cpua(44, 92, 8c, 86, 99, 65, b3, 97), + be64_to_cpua(71, cd, 8f, 18, 99, f0, 0f, 13), + be64_to_cpua(bf, e3, 75, 24, 49, ac, fb, c8), + be64_to_cpua(fc, 50, f6, 43, bd, 50, 82, 0e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha512) */ + "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46" + "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4" + "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95" + "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe" + "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa" + "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac" + "\xa3", + .key_len = 97, + .m = + "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1" + "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71" + "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc" + "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(3e, b3, c7, a8, b3, 17, 77, d1), + be64_to_cpua(dc, 2b, 43, 0e, 6a, b3, 53, 6f), + be64_to_cpua(4c, fc, 6f, 80, e3, af, b3, d9), + be64_to_cpua(9a, 02, de, 93, e8, 83, e4, 84), + be64_to_cpua(4d, c6, ef, da, 02, e7, 0f, 52), + be64_to_cpua(00, 1d, 20, 94, 77, fe, 31, fa), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(4e, 45, cf, 3c, 93, ff, 50, 5d), + be64_to_cpua(34, e4, 8b, 80, a5, b6, da, 2c), + be64_to_cpua(c4, 6a, 03, 5f, 8d, 7a, f9, fb), + be64_to_cpua(ec, 63, e3, 0c, ec, 50, dc, cc), + be64_to_cpua(de, 3a, 3d, 16, af, b4, 52, 6a), + be64_to_cpua(63, f6, f0, 3d, 5f, 5f, 99, 3f), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p521_tv_template[] = { + { + .key = /* secp521r1(sha224) */ + "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0" + "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7" + "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61" + "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb" + "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a" + "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76" + "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47" + "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f" + "\x3b\x83\x82\x2f\x14", + .key_len = 133, + .m = + "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4" + "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(46, 6b, c7, af, 7a, b9, 19, 0a), + be64_to_cpua(6c, a6, 9b, 89, 8b, 1e, fd, 09), + be64_to_cpua(98, 85, 29, 88, ff, 0b, 94, 94), + be64_to_cpua(18, c6, 37, 8a, cb, a7, d8, 7d), + be64_to_cpua(f8, 3f, 59, 0f, 74, f0, 3f, d8), + be64_to_cpua(e2, ef, 07, 92, ee, 60, 94, 06), + be64_to_cpua(35, f6, dc, 6d, 02, 7b, 22, ac), + be64_to_cpua(d6, 43, e7, ff, 42, b2, ba, 74), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 01), + be64_to_cpua(50, b1, a5, 98, 92, 2a, a5, 52), + be64_to_cpua(1c, ad, 22, da, 82, 00, 35, a3), + be64_to_cpua(0e, 64, cc, c4, e8, 43, d9, 0e), + be64_to_cpua(30, 90, 0f, 1c, 8f, 78, d3, 9f), + be64_to_cpua(26, 0b, 5f, 49, 32, 6b, 91, 99), + be64_to_cpua(0f, f8, 65, 97, 6b, 09, 4d, 22), + be64_to_cpua(5e, f9, 88, f3, d2, 32, 90, 57), + be64_to_cpua(26, 0d, 55, cd, 23, 1e, 7d, a0), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 3a) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha256) */ + "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae" + "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14" + "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36" + "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81" + "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c" + "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09" + "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05" + "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3" + "\xe6\x77\x1e\x1f\x8a", + .key_len = 133, + .m = + "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39" + "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(de, 7e, d7, 59, 10, e9, d9, d5), + be64_to_cpua(38, 1f, 46, 0b, 04, 64, 34, 79), + be64_to_cpua(ae, ce, 54, 76, 9a, c2, 8f, b8), + be64_to_cpua(95, 35, 6f, 02, 0e, af, e1, 4c), + be64_to_cpua(56, 3c, f6, f0, d8, e1, b7, 5d), + be64_to_cpua(50, 9f, 7d, 1f, ca, 8b, a8, 2d), + be64_to_cpua(06, 0f, fd, 83, fc, 0e, d9, ce), + be64_to_cpua(a5, 5f, 57, 52, 27, 78, 3a, b5), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, cd), + be64_to_cpua(55, 38, b6, f6, 34, 65, c7, bd), + be64_to_cpua(1c, 57, 56, 8f, 12, b7, 1d, 91), + be64_to_cpua(03, 42, 02, 5f, 50, f0, a2, 0d), + be64_to_cpua(fa, 10, dd, 9b, fb, 36, 1a, 31), + be64_to_cpua(e0, 87, 2c, 44, 4b, 5a, ee, af), + be64_to_cpua(a9, 79, 24, b9, 37, 35, dd, a0), + be64_to_cpua(6b, 35, ae, 65, b5, 99, 12, 0a), + be64_to_cpua(50, 85, 38, f9, 15, 83, 18, 04), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, cf) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha384) */ + "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b" + "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33" + "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4" + "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c" + "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47" + "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14" + "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46" + "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39" + "\xb3\x3b\x5b\x1b\x94", + .key_len = 133, + .m = + "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26" + "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59" + "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(b8, 6a, dd, fb, e6, 63, 4e, 28), + be64_to_cpua(84, 59, fd, 1a, c4, 40, dd, 43), + be64_to_cpua(32, 76, 06, d0, f9, c0, e4, e6), + be64_to_cpua(e4, df, 9b, 7d, 9e, 47, ca, 33), + be64_to_cpua(7e, 42, 71, 86, 57, 2d, f1, 7d), + be64_to_cpua(f2, 4b, 64, 98, f7, ec, da, c7), + be64_to_cpua(ec, 51, dc, e8, 35, 5e, ae, 16), + be64_to_cpua(96, 76, 3c, 27, ea, aa, 9c, 26), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 93), + be64_to_cpua(c6, 4f, ab, 2b, 62, c1, 42, b1), + be64_to_cpua(e5, 5a, 94, 56, cf, 8f, b4, 22), + be64_to_cpua(6a, c3, f3, 7a, d1, fa, e7, a7), + be64_to_cpua(df, c4, c0, db, 54, db, 8a, 0d), + be64_to_cpua(da, a7, cd, 26, 28, 76, 3b, 52), + be64_to_cpua(e4, 3c, bc, 93, 65, 57, 1c, 30), + be64_to_cpua(55, ce, 37, 97, c9, 05, 51, e5), + be64_to_cpua(c3, 6a, 87, 6e, b5, 13, 1f, 20), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, ff) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha512) */ + "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f" + "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a" + "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f" + "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee" + "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e" + "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6" + "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77" + "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae" + "\xd0\x17\xdf\x49\x6a", + .key_len = 133, + .m = + "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69" + "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed" + "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3" + "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(28, b5, 04, b0, b6, 33, 1c, 7e), + be64_to_cpua(80, a6, 13, fc, b6, 90, f7, bb), + be64_to_cpua(27, 93, e8, 6c, 49, 7d, 28, fc), + be64_to_cpua(1f, 12, 3e, b7, 7e, 51, ff, 7f), + be64_to_cpua(fb, 62, 1e, 42, 03, 6c, 74, 8a), + be64_to_cpua(63, 0e, 02, cc, 94, a9, 05, b9), + be64_to_cpua(aa, 86, ec, a8, 05, 03, 52, 56), + be64_to_cpua(71, 86, 96, ac, 21, 33, 7e, 4e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 5c), + be64_to_cpua(46, 1e, 77, 44, 78, e0, d1, 04), + be64_to_cpua(72, 74, 13, 63, 39, a6, e5, 25), + be64_to_cpua(00, 55, bb, 6a, b4, 73, 00, d2), + be64_to_cpua(71, d0, e9, ca, a7, c0, cb, aa), + be64_to_cpua(7a, 76, 37, 51, 47, 49, 98, 12), + be64_to_cpua(88, 05, 3e, 43, 39, 01, bd, b7), + be64_to_cpua(95, 35, 89, 4f, 41, 5f, 9e, 19), + be64_to_cpua(43, 52, 1d, e3, c6, bd, 5a, 40), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 70) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +/* + * ECDSA X9.62 test vectors. + * + * Identical to ECDSA test vectors, except signature in "c" is X9.62 encoded. + */ +static const struct sig_testvec x962_ecdsa_nist_p192_tv_template[] = { + { + .key = /* secp192r1(sha1) */ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" "\x98", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" "\x63\x85\xe7\x82", .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, .c = "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" @@ -593,23 +1356,17 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x80\x6f\xa5\x79\x77\xda\xd0", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha224) */ "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" "\xa3", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40" "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b" "\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14" @@ -617,23 +1374,17 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x5c\x99\xdb\x92\x5b\x36", .c_size = 54, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha256) */ "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae" "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56" "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58" "\x91", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd" "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56" "\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3" @@ -641,24 +1392,18 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x3a\x97\xd9\xcd\x1a\x6a\x49", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha384) */ "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7" "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f" "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e" "\x8b", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9" "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61" "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34" "\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64" @@ -666,25 +1411,19 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x12\x3b\x3b\x28\xfb\x6d\xe1", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha512) */ "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea" "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d" "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11" "\x57", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23" "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67" "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70" "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07" "\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73" @@ -692,28 +1431,22 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x6a\xdf\x97\xfd\x82\x76\x24", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, }; -static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { +static const struct sig_testvec x962_ecdsa_nist_p256_tv_template[] = { { - .key = + .key = /* secp256r1(sha1) */ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" "\xaf", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" "\x0b\xde\x6a\x42", .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, .c = "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" @@ -722,24 +1455,18 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", .c_size = 72, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha224) */ "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0" "\xd4", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41" "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59" "\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25" @@ -748,24 +1475,18 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x2e\x8b\xde\x5a\x04\x0e", .c_size = 70, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha256) */ "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" "\xb8", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63" "\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67" @@ -774,25 +1495,19 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x2a\x65\x35\x23\xe3\x1d\xfa", .c_size = 71, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha384) */ "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d" "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e" "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00" "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2" "\x7c", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6" "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94" "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95" "\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c" @@ -801,26 +1516,20 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\xc0\x60\x11\x92\xdc\x17\x89\x12", .c_size = 72, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha512) */ "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a" "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38" "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b" "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92" "\xbf", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9" "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca" "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf" "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44" "\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04" @@ -829,11 +1538,10 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x31\x79\x4a\xe9\x81\x6a\xee", .c_size = 71, .public_key_vec = true, - .siggen_sigver_test = true, }, }; -static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { +static const struct sig_testvec x962_ecdsa_nist_p384_tv_template[] = { { .key = /* secp384r1(sha1) */ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" @@ -844,15 +1552,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" "\xf1", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" "\x3a\x69\xc1\x93", .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, .c = "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" @@ -863,7 +1566,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", .c_size = 104, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha224) */ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" @@ -874,15 +1576,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d" "\xe0", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd" "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4" "\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4" @@ -893,7 +1590,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x88\x2b\x82\x26\x5e\x1c\xda\xfb", .c_size = 104, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha256) */ "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a" @@ -904,15 +1600,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc" "\xab", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3" "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce" "\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84" @@ -923,7 +1614,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xf4\x1f\x39\xca\x4d\x43", .c_size = 102, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha384) */ "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f" @@ -934,16 +1624,11 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89" "\x9e", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf" "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1" "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62" "\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb" @@ -954,7 +1639,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xab\x8d\x4e\xde\xe6\x6d\x9b\x66", .c_size = 104, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha512) */ "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46" @@ -965,17 +1649,12 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac" "\xa3", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1" "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71" "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc" "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02" "\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3" @@ -986,14 +1665,163 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x3c\x93\xff\x50\x5d", .c_size = 101, .public_key_vec = true, - .siggen_sigver_test = true, + }, +}; + +static const struct sig_testvec x962_ecdsa_nist_p521_tv_template[] = { + { + .key = /* secp521r1(sha224) */ + "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0" + "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7" + "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61" + "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb" + "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a" + "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76" + "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47" + "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f" + "\x3b\x83\x82\x2f\x14", + .key_len = 133, + .m = + "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4" + "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb", + .m_size = 28, + .c = + "\x30\x81\x86\x02\x41\x01\xd6\x43\xe7\xff\x42\xb2\xba\x74\x35\xf6" + "\xdc\x6d\x02\x7b\x22\xac\xe2\xef\x07\x92\xee\x60\x94\x06\xf8\x3f" + "\x59\x0f\x74\xf0\x3f\xd8\x18\xc6\x37\x8a\xcb\xa7\xd8\x7d\x98\x85" + "\x29\x88\xff\x0b\x94\x94\x6c\xa6\x9b\x89\x8b\x1e\xfd\x09\x46\x6b" + "\xc7\xaf\x7a\xb9\x19\x0a\x02\x41\x3a\x26\x0d\x55\xcd\x23\x1e\x7d" + "\xa0\x5e\xf9\x88\xf3\xd2\x32\x90\x57\x0f\xf8\x65\x97\x6b\x09\x4d" + "\x22\x26\x0b\x5f\x49\x32\x6b\x91\x99\x30\x90\x0f\x1c\x8f\x78\xd3" + "\x9f\x0e\x64\xcc\xc4\xe8\x43\xd9\x0e\x1c\xad\x22\xda\x82\x00\x35" + "\xa3\x50\xb1\xa5\x98\x92\x2a\xa5\x52", + .c_size = 137, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha256) */ + "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae" + "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14" + "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36" + "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81" + "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c" + "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09" + "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05" + "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3" + "\xe6\x77\x1e\x1f\x8a", + .key_len = 133, + .m = + "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39" + "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73", + .m_size = 32, + .c = + "\x30\x81\x88\x02\x42\x00\xcd\xa5\x5f\x57\x52\x27\x78\x3a\xb5\x06" + "\x0f\xfd\x83\xfc\x0e\xd9\xce\x50\x9f\x7d\x1f\xca\x8b\xa8\x2d\x56" + "\x3c\xf6\xf0\xd8\xe1\xb7\x5d\x95\x35\x6f\x02\x0e\xaf\xe1\x4c\xae" + "\xce\x54\x76\x9a\xc2\x8f\xb8\x38\x1f\x46\x0b\x04\x64\x34\x79\xde" + "\x7e\xd7\x59\x10\xe9\xd9\xd5\x02\x42\x01\xcf\x50\x85\x38\xf9\x15" + "\x83\x18\x04\x6b\x35\xae\x65\xb5\x99\x12\x0a\xa9\x79\x24\xb9\x37" + "\x35\xdd\xa0\xe0\x87\x2c\x44\x4b\x5a\xee\xaf\xfa\x10\xdd\x9b\xfb" + "\x36\x1a\x31\x03\x42\x02\x5f\x50\xf0\xa2\x0d\x1c\x57\x56\x8f\x12" + "\xb7\x1d\x91\x55\x38\xb6\xf6\x34\x65\xc7\xbd", + .c_size = 139, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha384) */ + "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b" + "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33" + "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4" + "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c" + "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47" + "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14" + "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46" + "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39" + "\xb3\x3b\x5b\x1b\x94", + .key_len = 133, + .m = + "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26" + "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59" + "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76", + .m_size = 48, + .c = + "\x30\x81\x88\x02\x42\x00\x93\x96\x76\x3c\x27\xea\xaa\x9c\x26\xec" + "\x51\xdc\xe8\x35\x5e\xae\x16\xf2\x4b\x64\x98\xf7\xec\xda\xc7\x7e" + "\x42\x71\x86\x57\x2d\xf1\x7d\xe4\xdf\x9b\x7d\x9e\x47\xca\x33\x32" + "\x76\x06\xd0\xf9\xc0\xe4\xe6\x84\x59\xfd\x1a\xc4\x40\xdd\x43\xb8" + "\x6a\xdd\xfb\xe6\x63\x4e\x28\x02\x42\x00\xff\xc3\x6a\x87\x6e\xb5" + "\x13\x1f\x20\x55\xce\x37\x97\xc9\x05\x51\xe5\xe4\x3c\xbc\x93\x65" + "\x57\x1c\x30\xda\xa7\xcd\x26\x28\x76\x3b\x52\xdf\xc4\xc0\xdb\x54" + "\xdb\x8a\x0d\x6a\xc3\xf3\x7a\xd1\xfa\xe7\xa7\xe5\x5a\x94\x56\xcf" + "\x8f\xb4\x22\xc6\x4f\xab\x2b\x62\xc1\x42\xb1", + .c_size = 139, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha512) */ + "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f" + "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a" + "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f" + "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee" + "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e" + "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6" + "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77" + "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae" + "\xd0\x17\xdf\x49\x6a", + .key_len = 133, + .m = + "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69" + "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed" + "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3" + "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68", + .m_size = 64, + .c = + "\x30\x81\x88\x02\x42\x01\x5c\x71\x86\x96\xac\x21\x33\x7e\x4e\xaa" + "\x86\xec\xa8\x05\x03\x52\x56\x63\x0e\x02\xcc\x94\xa9\x05\xb9\xfb" + "\x62\x1e\x42\x03\x6c\x74\x8a\x1f\x12\x3e\xb7\x7e\x51\xff\x7f\x27" + "\x93\xe8\x6c\x49\x7d\x28\xfc\x80\xa6\x13\xfc\xb6\x90\xf7\xbb\x28" + "\xb5\x04\xb0\xb6\x33\x1c\x7e\x02\x42\x01\x70\x43\x52\x1d\xe3\xc6" + "\xbd\x5a\x40\x95\x35\x89\x4f\x41\x5f\x9e\x19\x88\x05\x3e\x43\x39" + "\x01\xbd\xb7\x7a\x76\x37\x51\x47\x49\x98\x12\x71\xd0\xe9\xca\xa7" + "\xc0\xcb\xaa\x00\x55\xbb\x6a\xb4\x73\x00\xd2\x72\x74\x13\x63\x39" + "\xa6\xe5\x25\x46\x1e\x77\x44\x78\xe0\xd1\x04", + .c_size = 139, + .public_key_vec = true, + }, +}; + +/* + * ECDSA P1363 test vectors. + * + * Identical to ECDSA test vectors, except signature in "c" is P1363 encoded. + */ +static const struct sig_testvec p1363_ecdsa_nist_p256_tv_template[] = { + { + .key = /* secp256r1(sha256) */ + "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" + "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" + "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" + "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" + "\xb8", + .key_len = 65, + .m = + "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" + "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", + .m_size = 32, + .c = + "\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63\xa8\x1a\xad\xfc" + "\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67\xdc\x0c\xd0\x82" + "\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9\xa9\x81\xac\x4a\x50\xd0\x91" + "\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f\x2a\x65\x35\x23\xe3\x1d\xfa", + .c_size = 64, + .public_key_vec = true, }, }; /* * EC-RDSA test vectors are generated by gost-engine. */ -static const struct akcipher_testvec ecrdsa_tv_template[] = { +static const struct sig_testvec ecrdsa_tv_template[] = { { .key = "\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05" @@ -1018,7 +1846,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1044,7 +1871,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1070,7 +1896,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1105,7 +1930,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f", .m_size = 64, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1140,17 +1964,71 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc", .m_size = 64, .public_key_vec = true, - .siggen_sigver_test = true, + }, +}; + +/* + * PKCS#1 RSA test vectors for hash algorithm "none" + * (i.e. the hash in "m" is not prepended by a Full Hash Prefix) + * + * Obtained from: + * https://vcsjones.dev/sometimes-valid-rsa-dotnet/ + * https://gist.github.com/vcsjones/ab4c2327b53ed018eada76b75ef4fd99 + */ +static const struct sig_testvec pkcs1_rsa_none_tv_template[] = { + { + .key = + "\x30\x82\x01\x0a\x02\x82\x01\x01\x00\xa2\x63\x0b\x39\x44\xb8\xbb" + "\x23\xa7\x44\x49\xbb\x0e\xff\xa1\xf0\x61\x0a\x53\x93\xb0\x98\xdb" + "\xad\x2c\x0f\x4a\xc5\x6e\xff\x86\x3c\x53\x55\x0f\x15\xce\x04\x3f" + "\x2b\xfd\xa9\x96\x96\xd9\xbe\x61\x79\x0b\x5b\xc9\x4c\x86\x76\xe5" + "\xe0\x43\x4b\x22\x95\xee\xc2\x2b\x43\xc1\x9f\xd8\x68\xb4\x8e\x40" + "\x4f\xee\x85\x38\xb9\x11\xc5\x23\xf2\x64\x58\xf0\x15\x32\x6f\x4e" + "\x57\xa1\xae\x88\xa4\x02\xd7\x2a\x1e\xcd\x4b\xe1\xdd\x63\xd5\x17" + "\x89\x32\x5b\xb0\x5e\x99\x5a\xa8\x9d\x28\x50\x0e\x17\xee\x96\xdb" + "\x61\x3b\x45\x51\x1d\xcf\x12\x56\x0b\x92\x47\xfc\xab\xae\xf6\x66" + "\x3d\x47\xac\x70\x72\xe7\x92\xe7\x5f\xcd\x10\xb9\xc4\x83\x64\x94" + "\x19\xbd\x25\x80\xe1\xe8\xd2\x22\xa5\xd0\xba\x02\x7a\xa1\x77\x93" + "\x5b\x65\xc3\xee\x17\x74\xbc\x41\x86\x2a\xdc\x08\x4c\x8c\x92\x8c" + "\x91\x2d\x9e\x77\x44\x1f\x68\xd6\xa8\x74\x77\xdb\x0e\x5b\x32\x8b" + "\x56\x8b\x33\xbd\xd9\x63\xc8\x49\x9d\x3a\xc5\xc5\xea\x33\x0b\xd2" + "\xf1\xa3\x1b\xf4\x8b\xbe\xd9\xb3\x57\x8b\x3b\xde\x04\xa7\x7a\x22" + "\xb2\x24\xae\x2e\xc7\x70\xc5\xbe\x4e\x83\x26\x08\xfb\x0b\xbd\xa9" + "\x4f\x99\x08\xe1\x10\x28\x72\xaa\xcd\x02\x03\x01\x00\x01", + .key_len = 270, + .m = + "\x68\xb4\xf9\x26\x34\x31\x25\xdd\x26\x50\x13\x68\xc1\x99\x26\x71" + "\x19\xa2\xde\x81", + .m_size = 20, + .c = + "\x6a\xdb\x39\xe5\x63\xb3\x25\xde\x58\xca\xc3\xf1\x36\x9c\x0b\x36" + "\xb7\xd6\x69\xf9\xba\xa6\x68\x14\x8c\x24\x52\xd3\x25\xa5\xf3\xad" + "\xc9\x47\x44\xde\x06\xd8\x0f\x56\xca\x2d\xfb\x0f\xe9\x99\xe2\x9d" + "\x8a\xe8\x7f\xfb\x9a\x99\x96\xf1\x2c\x4a\xe4\xc0\xae\x4d\x29\x47" + "\x38\x96\x51\x2f\x6d\x8e\xb8\x88\xbd\x1a\x0a\x70\xbc\x23\x38\x67" + "\x62\x22\x01\x23\x71\xe5\xbb\x95\xea\x6b\x8d\x31\x62\xbf\xf0\xc4" + "\xb9\x46\xd6\x67\xfc\x4c\xe6\x1f\xd6\x5d\xf7\xa9\xad\x3a\xf1\xbf" + "\xa2\xf9\x66\xde\xb6\x8e\xec\x8f\x81\x8d\x1e\x3a\x12\x27\x6a\xfc" + "\xae\x92\x9f\xc3\x87\xc3\xba\x8d\x04\xb8\x8f\x0f\x61\x68\x9a\x96" + "\x2c\x80\x2c\x32\x40\xde\x9d\xb9\x9b\xe2\xe4\x45\x2e\x91\x47\x5c" + "\x47\xa4\x9d\x02\x57\x59\xf7\x75\x5d\x5f\x32\x82\x75\x5d\xe5\x78" + "\xc9\x19\x61\x46\x06\x9d\xa5\x1d\xd6\x32\x48\x9a\xdb\x09\x29\x81" + "\x14\x2e\xf0\x27\xe9\x37\x13\x74\xec\xa5\xcd\x67\x6b\x19\xf6\x88" + "\xf0\xc2\x8b\xa8\x7f\x2f\x76\x5a\x3e\x0c\x47\x5d\xe8\x82\x50\x27" + "\x40\xce\x27\x41\x45\xa0\xcf\xaa\x2f\xd3\xad\x3c\xbf\x73\xff\x93" + "\xe3\x78\x49\xd9\xa9\x78\x22\x81\x9a\xe5\xe2\x94\xe9\x40\xab\xf1", + .c_size = 256, + .public_key_vec = true, }, }; /* * PKCS#1 RSA test vectors. Obtained from CAVS testing. */ -static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { +static const struct sig_testvec pkcs1_rsa_tv_template[] = { { .key = - "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" + "\x30\x82\x04\xa5\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" @@ -1166,42 +2044,66 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\x9e\x49\x63\x6e\x02\xc1\xc9\x3a\x9b\xa5\x22\x1b\x07\x95\xd6\x10" "\x02\x50\xfd\xfd\xd1\x9b\xbe\xab\xc2\xc0\x74\xd7\xec\x00\xfb\x11" "\x71\xcb\x7a\xdc\x81\x79\x9f\x86\x68\x46\x63\x82\x4d\xb7\xf1\xe6" - "\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x82\x01\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01" - "\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac\x47" - "\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4\xdc" - "\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b\x12" - "\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd\xef" - "\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71\x9c" - "\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5\x80" - "\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f\x8d" - "\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e\x28" - "\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5\x95" - "\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae\xf1" - "\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52\x4c" - "\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d\xd4" - "\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88\x4e" - "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" - "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" - "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" - "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00" - "\x02\x01\x00", - .key_len = 804, + "\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x03\x01\x00" + "\x01\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac" + "\x47\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4" + "\xdc\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b" + "\x12\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd" + "\xef\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71" + "\x9c\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5" + "\x80\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f" + "\x8d\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e" + "\x28\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5" + "\x95\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae" + "\xf1\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52" + "\x4c\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d" + "\xd4\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88" + "\x4e\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9" + "\x7a\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f" + "\xda\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d" + "\x46\xb8\x35\xdf\x41\x02\x81\x81\x00\xe4\x4c\xae\xde\x16\xfd\x9f" + "\x83\x55\x5b\x84\x4a\xcf\x1c\xf1\x37\x95\xad\xca\x29\x7f\x2d\x6e" + "\x32\x81\xa4\x2b\x26\x14\x96\x1d\x40\x05\xec\x0c\xaf\x3f\x2c\x6f" + "\x2c\xe8\xbf\x1d\xee\xd0\xb3\xef\x7c\x5b\x9e\x88\x4f\x2a\x8b\x0e" + "\x4a\xbd\xb7\x8c\xfa\x10\x0e\x3b\xda\x68\xad\x41\x2b\xe4\x96\xfa" + "\x7f\x80\x52\x5f\x07\x9f\x0e\x3b\x5e\x96\x45\x1a\x13\x2b\x94\xce" + "\x1f\x07\x69\x85\x35\xfc\x69\x63\x5b\xf8\xf8\x3f\xce\x9d\x40\x1e" + "\x7c\xad\xfb\x9e\xce\xe0\x01\xf8\xef\x59\x5d\xdc\x00\x79\xab\x8a" + "\x3f\x80\xa2\x76\x32\x94\xa9\xea\x65\x02\x81\x81\x00\xf1\x38\x60" + "\x90\x0d\x0c\x2e\x3d\x34\xe5\x90\xea\x21\x43\x1f\x68\x63\x16\x7b" + "\x25\x8d\xde\x82\x2b\x52\xf8\xa3\xfd\x0f\x39\xe7\xe9\x5e\x32\x75" + "\x15\x7d\xd0\xc9\xce\x06\xe5\xfb\xa9\xcb\x22\xe5\xdb\x49\x09\xf2" + "\xe6\xb7\xa5\xa7\x75\x2e\x91\x2d\x2b\x5d\xf1\x48\x61\x45\x43\xd7" + "\xbd\xfc\x11\x73\xb5\x11\x9f\xb2\x18\x3a\x6f\x36\xa7\xc2\xd3\x18" + "\x4d\xf0\xc5\x1f\x70\x8c\x9b\xc5\x1d\x95\xa8\x5a\x9e\x8c\xb1\x4b" + "\x6a\x2a\x84\x76\x2c\xd8\x4f\x47\xb0\x81\x84\x02\x45\xf0\x85\xf8" + "\x0c\x6d\xa7\x0c\x4d\x2c\xb2\x5b\x81\x70\xfd\x6e\x17\x02\x81\x81" + "\x00\x8d\x07\xc5\xfa\x92\x4f\x48\xcb\xd3\xdd\xfe\x02\x4c\xa1\x7f" + "\x6d\xab\xfc\x38\xe7\x9b\x95\xcf\xfe\x49\x51\xc6\x09\xf7\x2b\xa8" + "\x94\x15\x54\x75\x9d\x88\xb4\x05\x55\xc3\xcd\xd4\x4a\xe4\x08\x53" + "\xc8\x09\xbd\x0c\x4d\x83\x65\x75\x85\xbc\x5e\xf8\x2a\xbd\xe2\x5d" + "\x1d\x16\x0e\xf9\x34\x89\x38\xaf\x34\x36\x6c\x2c\x22\x44\x22\x81" + "\x90\x73\xd9\xea\x3a\xaf\x70\x74\x48\x7c\xc6\xb5\xb0\xdc\xe5\xa9" + "\xa8\x76\x4b\xbc\xf7\x00\xf3\x4c\x22\x0f\x44\x62\x1d\x40\x0a\x57" + "\xe2\x5b\xdd\x7c\x7b\x9a\xad\xda\x70\x52\x21\x8a\x4c\xc2\xc3\x98" + "\x75\x02\x81\x81\x00\xed\x24\x5c\xa2\x21\x81\xa1\x0f\xa1\x2a\x33" + "\x0e\x49\xc7\x00\x60\x92\x51\x6e\x9d\x9b\xdc\x6d\x22\x04\x7e\xd6" + "\x51\x19\x9f\xf6\xe3\x91\x2c\x8f\xb8\xa2\x29\x19\xcc\x47\x31\xdf" + "\xf8\xab\xf0\xd2\x02\x83\xca\x99\x16\xc2\xe2\xc3\x3f\x4b\x99\x83" + "\xcb\x87\x9e\x86\x66\xc2\x3e\x91\x21\x80\x66\xf3\xd6\xc5\xcd\xb6" + "\xbb\x64\xef\x22\xcf\x48\x94\x58\xe7\x7e\xd5\x7c\x34\x1c\xb7\xa2" + "\xd0\x93\xe9\x9f\xb5\x11\x61\xd7\x5f\x37\x0f\x64\x52\x70\x11\x78" + "\xcc\x08\x77\xeb\xf8\x30\x1e\xb4\x9e\x1b\x4a\xc7\xa8\x33\x51\xe0" + "\xed\xdf\x53\xf6\xdf\x02\x81\x81\x00\x86\xd9\x4c\xee\x65\x61\xc1" + "\x19\xa9\xd5\x74\x9b\xd5\xca\xf6\x83\x2b\x06\xb4\x20\xfe\x45\x29" + "\xe8\xe3\xfa\xe1\x4f\x28\x8e\x63\x2f\x74\xc3\x3a\x5c\x9a\xf5\x9e" + "\x0e\x0d\xc5\xfe\xa0\x4c\x00\xce\x7b\xa4\x19\x17\x59\xaf\x13\x3a" + "\x03\x8f\x54\xf5\x60\x39\x2e\xd9\x06\xb3\x7c\xd6\x90\x06\x41\x77" + "\xf3\x93\xe1\x7a\x01\x41\xc1\x8f\xfe\x4c\x88\x39\xdb\xde\x71\x9e" + "\x58\xd1\x49\x50\x80\xb2\x5a\x4f\x69\x8b\xb8\xfe\x63\xd4\x42\x3d" + "\x37\x61\xa8\x4c\xff\xb6\x99\x4c\xf4\x51\xe0\x44\xaa\x69\x79\x3f" + "\x81\xa4\x61\x3d\x26\xe9\x04\x52\x64", + .key_len = 1193, /* * m is SHA256 hash of following message: * "\x49\x41\xbe\x0a\x0c\xc9\xf6\x35\x51\xe4\x27\x56\x13\x71\x4b\xd0" @@ -1235,7 +2137,6 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b" "\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2", .c_size = 256, - .siggen_sigver_test = true, } }; @@ -1244,17 +2145,15 @@ static const struct kpp_testvec dh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x01\x00" /* type */ - "\x15\x02" /* len */ + "\x11\x02" /* len */ "\x00\x01\x00\x00" /* key_size */ "\x00\x01\x00\x00" /* p_size */ - "\x00\x00\x00\x00" /* q_size */ "\x01\x00\x00\x00" /* g_size */ #else "\x00\x01" /* type */ - "\x02\x15" /* len */ + "\x02\x11" /* len */ "\x00\x00\x01\x00" /* key_size */ "\x00\x00\x01\x00" /* p_size */ - "\x00\x00\x00\x00" /* q_size */ "\x00\x00\x00\x01" /* g_size */ #endif /* xa */ @@ -1344,7 +2243,7 @@ static const struct kpp_testvec dh_tv_template[] = { "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11" "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50" "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17", - .secret_size = 533, + .secret_size = 529, .b_public_size = 256, .expected_a_public_size = 256, .expected_ss_size = 256, @@ -1353,17 +2252,15 @@ static const struct kpp_testvec dh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x01\x00" /* type */ - "\x15\x02" /* len */ + "\x11\x02" /* len */ "\x00\x01\x00\x00" /* key_size */ "\x00\x01\x00\x00" /* p_size */ - "\x00\x00\x00\x00" /* q_size */ "\x01\x00\x00\x00" /* g_size */ #else "\x00\x01" /* type */ - "\x02\x15" /* len */ + "\x02\x11" /* len */ "\x00\x00\x01\x00" /* key_size */ "\x00\x00\x01\x00" /* p_size */ - "\x00\x00\x00\x00" /* q_size */ "\x00\x00\x00\x01" /* g_size */ #endif /* xa */ @@ -1453,1236 +2350,1440 @@ static const struct kpp_testvec dh_tv_template[] = { "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a" "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee" "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd", - .secret_size = 533, + .secret_size = 529, .b_public_size = 256, .expected_a_public_size = 256, .expected_ss_size = 256, } }; -static const struct kpp_testvec curve25519_tv_template[] = { -{ - .secret = (u8[32]){ 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, - 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, - 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, - 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, - .b_public = (u8[32]){ 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, - 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, - 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, - 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, - .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, - 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, - 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, - 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, - 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, - 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, - 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, - .b_public = (u8[32]){ 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, - 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, - 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, - 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, - .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, - 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, - 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, - 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 1 }, - .b_public = (u8[32]){ 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, - 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, - 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98, - 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 1 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, - 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, - 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3, - 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, - 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, - 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, - 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, - .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, - 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, - 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, - 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, - .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, - 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, - 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, - 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f }, - .expected_ss = (u8[32]){ 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2, - 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57, - 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05, - 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -{ - .secret = (u8[32]){ 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 }, - .expected_ss = (u8[32]){ 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d, - 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12, - 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99, - 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - normal case */ -{ - .secret = (u8[32]){ 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda, - 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66, - 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3, - 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba }, - .b_public = (u8[32]){ 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5, - 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9, - 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e, - 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a }, - .expected_ss = (u8[32]){ 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5, - 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38, - 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e, - 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key on twist */ -{ - .secret = (u8[32]){ 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4, - 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5, - 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49, - 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 }, - .b_public = (u8[32]){ 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5, - 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8, - 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3, - 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 }, - .expected_ss = (u8[32]){ 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff, - 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d, - 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe, - 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key on twist */ -{ - .secret = (u8[32]){ 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9, - 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39, - 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5, - 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 }, - .b_public = (u8[32]){ 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f, - 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b, - 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c, - 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 }, - .expected_ss = (u8[32]){ 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53, - 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57, - 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0, - 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key on twist */ -{ - .secret = (u8[32]){ 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc, - 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d, - 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67, - 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c }, - .b_public = (u8[32]){ 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97, - 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f, - 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45, - 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a }, - .expected_ss = (u8[32]){ 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93, - 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2, - 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44, - 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key on twist */ -{ - .secret = (u8[32]){ 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1, - 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95, - 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99, - 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d }, - .b_public = (u8[32]){ 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27, - 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07, - 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae, - 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c }, - .expected_ss = (u8[32]){ 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73, - 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2, - 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f, - 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key on twist */ -{ - .secret = (u8[32]){ 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9, - 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd, - 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b, - 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 }, - .b_public = (u8[32]){ 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5, - 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52, - 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8, - 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 }, - .expected_ss = (u8[32]){ 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86, - 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4, - 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6, - 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04, - 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77, - 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90, - 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 }, - .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97, - 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9, - 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7, - 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36, - 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd, - 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c, - 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 }, - .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e, - 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b, - 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e, - 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed, - 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e, - 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd, - 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff, - 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00, - 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f, - 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1, - 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10, - 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3, - 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d, - 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00, - 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 }, - .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00, - 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff, - 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8, - 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4, - 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70, - 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3, - 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a, - 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e, - 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 }, - .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57, - 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c, - 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59, - 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case on twist */ -{ - .secret = (u8[32]){ 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f, - 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42, - 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9, - 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 }, - .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c, - 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5, - 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65, - 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6, - 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4, - 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8, - 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe }, - .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7, - 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca, - 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f, - 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa, - 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3, - 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52, - 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }, - .expected_ss = (u8[32]){ 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3, - 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e, - 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75, - 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26, - 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea, - 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00, - 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, - .expected_ss = (u8[32]){ 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8, - 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32, - 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87, - 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c, - 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6, - 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb, - 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, - 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff, - 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f }, - .expected_ss = (u8[32]){ 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85, - 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f, - 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0, - 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38, - 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b, - 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c, - 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, - .expected_ss = (u8[32]){ 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b, - 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81, - 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3, - 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d, - 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42, - 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98, - 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c, - 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9, - 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89, - 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for public key */ -{ - .secret = (u8[32]){ 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29, - 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6, - 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c, - 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f }, - .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75, - 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89, - 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c, - 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc, - 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1, - 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d, - 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae }, - .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09, - 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde, - 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1, - 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81, - 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a, - 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99, - 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d }, - .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17, - 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35, - 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55, - 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11, - 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b, - 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9, - 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 }, - .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53, - 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e, - 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6, - 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78, - 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2, - 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd, - 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .expected_ss = (u8[32]){ 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb, - 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40, - 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2, - 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9, - 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60, - 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13, - 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 }, - .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, - .expected_ss = (u8[32]){ 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c, - 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3, - 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65, - 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a, - 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7, - 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11, - 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e }, - .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, - .expected_ss = (u8[32]){ 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82, - 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4, - 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c, - 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e, - 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a, - 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d, - 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f }, - .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, - .expected_ss = (u8[32]){ 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2, - 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60, - 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25, - 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb, - 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97, - 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c, - 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 }, - .b_public = (u8[32]){ 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23, - 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8, - 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69, - 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a, - 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23, - 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b, - 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 }, - .b_public = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b, - 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44, - 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37, - 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80, - 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d, - 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b, - 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 }, - .b_public = (u8[32]){ 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63, - 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae, - 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f, - 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0, - 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd, - 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49, - 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 }, - .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41, - 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0, - 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf, - 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9, - 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa, - 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5, - 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e }, - .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47, - 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3, - 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b, - 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8, - 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98, - 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0, - 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 }, - .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0, - 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1, - 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a, - 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02, - 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4, - 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68, - 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d }, - .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f, - 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2, - 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95, - 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7, - 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06, - 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9, - 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 }, - .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5, - 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0, - 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80, - 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - public key >= p */ -{ - .secret = (u8[32]){ 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd, - 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4, - 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04, - 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 }, - .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - .expected_ss = (u8[32]){ 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0, - 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac, - 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48, - 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - RFC 7748 */ -{ - .secret = (u8[32]){ 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, - 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, - 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, - 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 }, - .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, - 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, - 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, - 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, - .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, - 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, - 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, - 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - RFC 7748 */ -{ - .secret = (u8[32]){ 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, - 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, - 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, - 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d }, - .b_public = (u8[32]){ 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, - 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, - 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, - 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 }, - .expected_ss = (u8[32]){ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, - 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, - 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, - 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde, - 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8, - 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4, - 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 }, - .expected_ss = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d, - 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64, - 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd, - 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 }, - .expected_ss = (u8[32]){ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8, - 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf, - 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94, - 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d }, - .expected_ss = (u8[32]){ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84, - 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62, - 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e, - 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 }, - .expected_ss = (u8[32]){ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8, - 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58, - 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02, - 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 }, - .expected_ss = (u8[32]){ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9, - 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a, - 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44, - 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b }, - .expected_ss = (u8[32]){ 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd, - 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22, - 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56, - 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b }, - .expected_ss = (u8[32]){ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53, - 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f, - 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18, - 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f }, - .expected_ss = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55, - 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b, - 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79, - 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f }, - .expected_ss = (u8[32]){ 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39, - 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c, - 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb, - 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e }, - .expected_ss = (u8[32]){ 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04, - 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10, - 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58, - 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c }, - .expected_ss = (u8[32]){ 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3, - 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c, - 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88, - 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 }, - .expected_ss = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a, - 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49, - 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a, - 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f }, - .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - edge case for shared secret */ -{ - .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, - 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, - 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, - 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, - .b_public = (u8[32]){ 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca, - 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c, - 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb, - 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 }, - .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - checking for overflow */ -{ - .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, - 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, - 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, - 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, - .b_public = (u8[32]){ 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58, - 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7, - 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01, - 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d }, - .expected_ss = (u8[32]){ 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d, - 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27, - 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b, - 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - checking for overflow */ -{ - .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, - 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, - 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, - 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, - .b_public = (u8[32]){ 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26, - 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2, - 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44, - 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e }, - .expected_ss = (u8[32]){ 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6, - 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d, - 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e, - 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - checking for overflow */ -{ - .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, - 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, - 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, - 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, - .b_public = (u8[32]){ 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61, - 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67, - 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e, - 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c }, - .expected_ss = (u8[32]){ 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65, - 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce, - 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0, - 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, - -}, -/* wycheproof - checking for overflow */ -{ - .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, - 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, - 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, - 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, - .b_public = (u8[32]){ 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee, - 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d, - 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14, - 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 }, - .expected_ss = (u8[32]){ 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e, - 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc, - 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5, - 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, +static const struct kpp_testvec ffdhe2048_dh_tv_template[] __maybe_unused = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x01" /* len */ + "\x00\x01\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x01\x10" /* len */ + "\x00\x00\x01\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x23\x7d\xd0\x06\xfd\x7a\xe5\x7a\x08\xda\x98\x31\xc0\xb3\xd5\x85" + "\xe2\x0d\x2a\x91\x5f\x78\x4b\xa6\x62\xd0\xa6\x35\xd4\xef\x86\x39" + "\xf1\xdb\x71\x5e\xb0\x11\x2e\xee\x91\x3a\xaa\xf9\xe3\xdf\x8d\x8b" + "\x48\x41\xde\xe8\x78\x53\xc5\x5f\x93\xd2\x79\x0d\xbe\x8d\x83\xe8" + "\x8f\x00\xd2\xde\x13\x18\x04\x05\x20\x6d\xda\xfa\x1d\x0b\x24\x52" + "\x3a\x18\x2b\xe1\x1e\xae\x15\x3b\x0f\xaa\x09\x09\xf6\x01\x98\xe9" + "\x81\x5d\x6b\x83\x6e\x55\xf1\x5d\x6f\x6f\x0d\x9d\xa8\x72\x32\x63" + "\x60\xe6\x0b\xc5\x22\xe2\xf9\x46\x58\xa2\x1c\x2a\xb0\xd5\xaf\xe3" + "\x5b\x03\xb7\x36\xb7\xba\x55\x20\x08\x7c\x51\xd4\x89\x42\x9c\x14" + "\x23\xe2\x71\x3e\x15\x2a\x0d\x34\x8a\xde\xad\x84\x11\x15\x72\x18" + "\x42\x43\x0a\xe2\x58\x29\xb3\x90\x0f\x56\xd8\x8a\x0f\x0e\xbc\x0e" + "\x9c\xe7\xd5\xe6\x5b\xbf\x06\x64\x38\x12\xa5\x8d\x5b\x68\x34\xdd" + "\x75\x48\xc9\xa7\xa3\x58\x5a\x1c\xe1\xb2\xc5\xe3\x39\x03\xcf\xab" + "\xc2\x14\x07\xaf\x55\x80\xc7\x63\xe4\x03\xeb\xe9\x0a\x25\x61\x85" + "\x1d\x0e\x81\x52\x7b\xbc\x4a\x0c\xc8\x59\x6a\xac\x18\xfb\x8c\x0c" + "\xb4\x79\xbd\xa1\x4c\xbb\x02\xc9\xd5\x13\x88\x3d\x25\xaa\x77\x49", + .b_public = + "\x5c\x00\x6f\xda\xfe\x4c\x0c\xc2\x18\xff\xa9\xec\x7a\xbe\x8a\x51" + "\x64\x6b\x57\xf8\xed\xe2\x36\x77\xc1\x23\xbf\x56\xa6\x48\x76\x34" + "\x0e\xf3\x68\x05\x45\x6a\x98\x5b\x9e\x8b\xc0\x11\x29\xcb\x5b\x66" + "\x2d\xc2\xeb\x4c\xf1\x7d\x85\x30\xaa\xd5\xf5\xb8\xd3\x62\x1e\x97" + "\x1e\x34\x18\xf8\x76\x8c\x10\xca\x1f\xe4\x5d\x62\xe1\xbe\x61\xef" + "\xaf\x2c\x8d\x97\x15\xa5\x86\xd5\xd3\x12\x6f\xec\xe2\xa4\xb2\x5a" + "\x35\x1d\xd4\x91\xa6\xef\x13\x09\x65\x9c\x45\xc0\x12\xad\x7f\xee" + "\x93\x5d\xfa\x89\x26\x7d\xae\xee\xea\x8c\xa3\xcf\x04\x2d\xa0\xc7" + "\xd9\x14\x62\xaf\xdf\xa0\x33\xd7\x5e\x83\xa2\xe6\x0e\x0e\x5d\x77" + "\xce\xe6\x72\xe4\xec\x9d\xff\x72\x9f\x38\x95\x19\x96\xba\x4c\xe3" + "\x5f\xb8\x46\x4a\x1d\xe9\x62\x7b\xa8\xdc\xe7\x61\x90\x6b\xb9\xd4" + "\xad\x0b\xa3\x06\xb3\x70\xfa\xea\x2b\xc4\x2c\xde\x43\x37\xf6\x8d" + "\x72\xf0\x86\x9a\xbb\x3b\x8e\x7a\x71\x03\x30\x30\x2a\x5d\xcd\x1e" + "\xe4\xd3\x08\x07\x75\x17\x17\x72\x1e\x77\x6c\x98\x0d\x29\x7f\xac" + "\xe7\xb2\xee\xa9\x1c\x33\x9d\x08\x39\xe1\xd8\x5b\xe5\xbc\x48\xb2" + "\xb6\xdf\xcd\xa0\x42\x06\xcc\xfb\xed\x60\x6f\xbc\x57\xac\x09\x45", + .expected_a_public = + "\x8b\xdb\xc1\xf7\xc6\xba\xa1\x38\x95\x6a\xa1\xb6\x04\x5e\xae\x52" + "\x72\xfc\xef\x2d\x9d\x71\x05\x9c\xd3\x02\xa9\xfb\x55\x0f\xfa\xc9" + "\xb4\x34\x51\xa3\x28\x89\x8d\x93\x92\xcb\xd9\xb5\xb9\x66\xfc\x67" + "\x15\x92\x6f\x73\x85\x15\xe2\xfc\x11\x6b\x97\x8c\x4b\x0f\x12\xfa" + "\x8d\x72\x76\x9b\x8f\x3b\xfe\x31\xbe\x42\x88\x4c\xd2\xb2\x70\xa6" + "\xa5\xe3\x7e\x73\x07\x12\x36\xaa\xc9\x5c\x83\xe1\xf1\x46\x41\x4f" + "\x7c\x52\xaf\xdc\xa4\xe6\x82\xa3\x86\x83\x47\x5a\x12\x3a\x0c\xe3" + "\xdd\xdb\x94\x03\x2a\x59\x91\xa0\x19\xe5\xf8\x07\xdd\x54\x6a\x22" + "\x43\xb7\xf3\x74\xd7\xb9\x30\xfe\x9c\xe8\xd1\xcf\x06\x43\x68\xb9" + "\x54\x8f\x54\xa2\xe5\x3c\xf2\xc3\x4c\xee\xd4\x7c\x5d\x0e\xb1\x7b" + "\x16\x68\xb5\xb3\x7d\xd4\x11\x83\x5c\x77\x17\xc4\xf0\x59\x76\x7a" + "\x83\x40\xe5\xd9\x4c\x76\x23\x5b\x17\x6d\xee\x4a\x92\x68\x4b\x89" + "\xa0\x6d\x23\x8c\x80\x31\x33\x3a\x12\xf4\x50\xa6\xcb\x13\x97\x01" + "\xb8\x2c\xe6\xd2\x38\xdf\xd0\x7f\xc6\x27\x19\x0e\xb2\x07\xfd\x1f" + "\x1b\x9c\x1b\x87\xf9\x73\x6a\x3f\x7f\xb0\xf9\x2f\x3c\x19\x9f\xc9" + "\x8f\x97\x21\x0e\x8e\xbb\x1a\x17\x20\x15\xdd\xc6\x42\x60\xae\x4d", + .expected_ss = + "\xf3\x0e\x64\x7b\x66\xd7\x82\x7e\xab\x7e\x4a\xbe\x13\x6f\x43\x3d" + "\xea\x4f\x1f\x8b\x9d\x41\x56\x71\xe1\x06\x96\x02\x68\xfa\x44\x6e" + "\xe7\xf2\x26\xd4\x01\x4a\xf0\x28\x25\x76\xad\xd7\xe0\x17\x74\xfe" + "\xf9\xe1\x6d\xd3\xf7\xc7\xdf\xc0\x62\xa5\xf3\x4e\x1b\x5c\x77\x2a" + "\xfb\x0b\x87\xc3\xde\x1e\xc1\xe0\xd3\x7a\xb8\x02\x02\xec\x9c\x97" + "\xfb\x34\xa0\x20\x10\x23\x87\xb2\x9a\x72\xe3\x3d\xb2\x18\x50\xf3" + "\x6a\xd3\xd3\x19\xc4\x36\xd5\x59\xd6\xd6\xa7\x5c\xc3\xf9\x09\x33" + "\xa1\xf5\xb9\x4b\xf3\x0b\xe1\x4f\x79\x6b\x45\xf2\xec\x8b\xe5\x69" + "\x9f\xc6\x05\x01\xfe\x3a\x13\xfd\x6d\xea\x03\x83\x29\x7c\x7f\xf5" + "\x41\x55\x95\xde\x7e\x62\xae\xaf\x28\xdb\x7c\xa9\x90\x1e\xb2\xb1" + "\x1b\xef\xf1\x2e\xde\x47\xaa\xa8\x92\x9a\x49\x3d\xc0\xe0\x8d\xbb" + "\x0c\x42\x86\xaf\x00\xce\xb0\xab\x22\x7c\xe9\xbe\xb9\x72\x2f\xcf" + "\x5e\x5d\x62\x52\x2a\xd1\xfe\xcc\xa2\xf3\x40\xfd\x01\xa7\x54\x0a" + "\xa1\xfb\x1c\xf2\x44\xa6\x47\x30\x5a\xba\x2a\x05\xff\xd0\x6c\xab" + "\xeb\xe6\x8f\xf6\xd7\x73\xa3\x0e\x6c\x0e\xcf\xfd\x8e\x16\x5d\xe0" + "\x2c\x11\x05\x82\x3c\x22\x16\x6c\x52\x61\xcf\xbb\xff\xf8\x06\xd0", + .secret_size = 272, + .b_public_size = 256, + .expected_a_public_size = 256, + .expected_ss_size = 256, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x00" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#else + "\x00\x01" /* type */ + "\x00\x10" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#endif + .b_secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x01" /* len */ + "\x00\x01\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x01\x10" /* len */ + "\x00\x00\x01\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x23\x7d\xd0\x06\xfd\x7a\xe5\x7a\x08\xda\x98\x31\xc0\xb3\xd5\x85" + "\xe2\x0d\x2a\x91\x5f\x78\x4b\xa6\x62\xd0\xa6\x35\xd4\xef\x86\x39" + "\xf1\xdb\x71\x5e\xb0\x11\x2e\xee\x91\x3a\xaa\xf9\xe3\xdf\x8d\x8b" + "\x48\x41\xde\xe8\x78\x53\xc5\x5f\x93\xd2\x79\x0d\xbe\x8d\x83\xe8" + "\x8f\x00\xd2\xde\x13\x18\x04\x05\x20\x6d\xda\xfa\x1d\x0b\x24\x52" + "\x3a\x18\x2b\xe1\x1e\xae\x15\x3b\x0f\xaa\x09\x09\xf6\x01\x98\xe9" + "\x81\x5d\x6b\x83\x6e\x55\xf1\x5d\x6f\x6f\x0d\x9d\xa8\x72\x32\x63" + "\x60\xe6\x0b\xc5\x22\xe2\xf9\x46\x58\xa2\x1c\x2a\xb0\xd5\xaf\xe3" + "\x5b\x03\xb7\x36\xb7\xba\x55\x20\x08\x7c\x51\xd4\x89\x42\x9c\x14" + "\x23\xe2\x71\x3e\x15\x2a\x0d\x34\x8a\xde\xad\x84\x11\x15\x72\x18" + "\x42\x43\x0a\xe2\x58\x29\xb3\x90\x0f\x56\xd8\x8a\x0f\x0e\xbc\x0e" + "\x9c\xe7\xd5\xe6\x5b\xbf\x06\x64\x38\x12\xa5\x8d\x5b\x68\x34\xdd" + "\x75\x48\xc9\xa7\xa3\x58\x5a\x1c\xe1\xb2\xc5\xe3\x39\x03\xcf\xab" + "\xc2\x14\x07\xaf\x55\x80\xc7\x63\xe4\x03\xeb\xe9\x0a\x25\x61\x85" + "\x1d\x0e\x81\x52\x7b\xbc\x4a\x0c\xc8\x59\x6a\xac\x18\xfb\x8c\x0c" + "\xb4\x79\xbd\xa1\x4c\xbb\x02\xc9\xd5\x13\x88\x3d\x25\xaa\x77\x49", + .b_public = + "\x8b\xdb\xc1\xf7\xc6\xba\xa1\x38\x95\x6a\xa1\xb6\x04\x5e\xae\x52" + "\x72\xfc\xef\x2d\x9d\x71\x05\x9c\xd3\x02\xa9\xfb\x55\x0f\xfa\xc9" + "\xb4\x34\x51\xa3\x28\x89\x8d\x93\x92\xcb\xd9\xb5\xb9\x66\xfc\x67" + "\x15\x92\x6f\x73\x85\x15\xe2\xfc\x11\x6b\x97\x8c\x4b\x0f\x12\xfa" + "\x8d\x72\x76\x9b\x8f\x3b\xfe\x31\xbe\x42\x88\x4c\xd2\xb2\x70\xa6" + "\xa5\xe3\x7e\x73\x07\x12\x36\xaa\xc9\x5c\x83\xe1\xf1\x46\x41\x4f" + "\x7c\x52\xaf\xdc\xa4\xe6\x82\xa3\x86\x83\x47\x5a\x12\x3a\x0c\xe3" + "\xdd\xdb\x94\x03\x2a\x59\x91\xa0\x19\xe5\xf8\x07\xdd\x54\x6a\x22" + "\x43\xb7\xf3\x74\xd7\xb9\x30\xfe\x9c\xe8\xd1\xcf\x06\x43\x68\xb9" + "\x54\x8f\x54\xa2\xe5\x3c\xf2\xc3\x4c\xee\xd4\x7c\x5d\x0e\xb1\x7b" + "\x16\x68\xb5\xb3\x7d\xd4\x11\x83\x5c\x77\x17\xc4\xf0\x59\x76\x7a" + "\x83\x40\xe5\xd9\x4c\x76\x23\x5b\x17\x6d\xee\x4a\x92\x68\x4b\x89" + "\xa0\x6d\x23\x8c\x80\x31\x33\x3a\x12\xf4\x50\xa6\xcb\x13\x97\x01" + "\xb8\x2c\xe6\xd2\x38\xdf\xd0\x7f\xc6\x27\x19\x0e\xb2\x07\xfd\x1f" + "\x1b\x9c\x1b\x87\xf9\x73\x6a\x3f\x7f\xb0\xf9\x2f\x3c\x19\x9f\xc9" + "\x8f\x97\x21\x0e\x8e\xbb\x1a\x17\x20\x15\xdd\xc6\x42\x60\xae\x4d", + .secret_size = 16, + .b_secret_size = 272, + .b_public_size = 256, + .expected_a_public_size = 256, + .expected_ss_size = 256, + .genkey = true, + }, +}; -}, -/* wycheproof - checking for overflow */ -{ - .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, - 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, - 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, - 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, - .b_public = (u8[32]){ 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4, - 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5, - 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c, - 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 }, - .expected_ss = (u8[32]){ 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b, - 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93, - 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f, - 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, +static const struct kpp_testvec ffdhe3072_dh_tv_template[] __maybe_unused = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x90\x01" /* len */ + "\x80\x01\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x01\x90" /* len */ + "\x00\x00\x01\x80" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x6b\xb4\x97\x23\xfa\xc8\x5e\xa9\x7b\x63\xe7\x3e\x0e\x99\xc3\xb9" + "\xda\xb7\x48\x0d\xc3\xb1\xbf\x4f\x17\xc7\xa9\x51\xf6\x64\xff\xc4" + "\x31\x58\x87\x25\x83\x2c\x00\xf0\x41\x29\xf7\xee\xf9\xe6\x36\x76" + "\xd6\x3a\x24\xbe\xa7\x07\x0b\x93\xc7\x9f\x6c\x75\x0a\x26\x75\x76" + "\xe3\x0c\x42\xe0\x00\x04\x69\xd9\xec\x0b\x59\x54\x28\x8f\xd7\x9a" + "\x63\xf4\x5b\xdf\x85\x65\xc4\xe1\x95\x27\x4a\x42\xad\x36\x47\xa9" + "\x0a\xf8\x14\x1c\xf3\x94\x3b\x7e\x47\x99\x35\xa8\x18\xec\x70\x10" + "\xdf\xcb\xd2\x78\x88\xc1\x2d\x59\x93\xc1\xa4\x6d\xd7\x1d\xb9\xd5" + "\xf8\x30\x06\x7f\x98\x90\x0c\x74\x5e\x89\x2f\x64\x5a\xad\x5f\x53" + "\xb2\xa3\xa8\x83\xbf\xfc\x37\xef\xb8\x36\x0a\x5c\x62\x81\x64\x74" + "\x16\x2f\x45\x39\x2a\x91\x26\x87\xc0\x12\xcc\x75\x11\xa3\xa1\xc5" + "\xae\x20\xcf\xcb\x20\x25\x6b\x7a\x31\x93\x9d\x38\xb9\x57\x72\x46" + "\xd4\x84\x65\x87\xf1\xb5\xd3\xab\xfc\xc3\x4d\x40\x92\x94\x1e\xcd" + "\x1c\x87\xec\x3f\xcd\xbe\xd0\x95\x6b\x40\x02\xdd\x62\xeb\x0a\xda" + "\x4f\xbe\x8e\x32\x48\x8b\x6d\x83\xa0\x96\x62\x23\xec\x83\x91\x44" + "\xf9\x72\x01\xac\xa0\xe4\x72\x1d\x5a\x75\x05\x57\x90\xae\x7e\xb4" + "\x71\x39\x01\x05\xdc\xe9\xee\xcb\xf0\x61\x28\x91\x69\x8c\x31\x03" + "\x7a\x92\x15\xa1\x58\x67\x3d\x70\x82\xa6\x2c\xfe\x10\x56\x58\xd3" + "\x94\x67\xe1\xbe\xee\xc1\x64\x5c\x4b\xc8\x28\x3d\xc5\x66\x3a\xab" + "\x22\xc1\x7e\xa1\xbb\xf3\x19\x3b\xda\x46\x82\x45\xd4\x3c\x7c\xc6" + "\xce\x1f\x7f\x95\xa2\x17\xff\x88\xba\xd6\x4d\xdb\xd2\xea\xde\x39" + "\xd6\xa5\x18\x73\xbb\x64\x6e\x79\xe9\xdc\x3f\x92\x7f\xda\x1f\x49" + "\x33\x70\x65\x73\xa2\xd9\x06\xb8\x1b\x29\x29\x1a\xe0\xa3\xe6\x05" + "\x9a\xa8\xc2\x4e\x7a\x78\x1d\x22\x57\x21\xc8\xa3\x8d\x66\x3e\x23", + .b_public = + "\x73\x40\x8b\xce\xe8\x6a\x1c\x03\x50\x54\x42\x36\x22\xc6\x1d\xe8" + "\xe1\xef\x5c\x89\xa5\x55\xc1\xc4\x1c\xd7\x4f\xee\x5d\xba\x62\x60" + "\xfe\x93\x2f\xfd\x93\x2c\x8f\x70\xc6\x47\x17\x25\xb2\x95\xd7\x7d" + "\x41\x81\x4d\x52\x1c\xbe\x4d\x57\x3e\x26\x51\x28\x03\x8f\x67\xf5" + "\x22\x16\x1c\x67\xf7\x62\xcb\xfd\xa3\xee\x8d\xe0\xfa\x15\x9a\x53" + "\xbe\x7b\x9f\xc0\x12\x7a\xfc\x5e\x77\x2d\x60\x06\xba\x71\xc5\xca" + "\xd7\x26\xaf\x3b\xba\x6f\xd3\xc4\x82\x57\x19\x26\xb0\x16\x7b\xbd" + "\x83\xf2\x21\x03\x79\xff\x0a\x6f\xc5\x7b\x00\x15\xad\x5b\xf4\x42" + "\x1f\xcb\x7f\x3d\x34\x77\x3c\xc3\xe0\x38\xa5\x40\x51\xbe\x6f\xd9" + "\xc9\x77\x9c\xfc\x0d\xc1\x8e\xef\x0f\xaa\x5e\xa8\xbb\x16\x4a\x3e" + "\x26\x55\xae\xc1\xb6\x3e\xfd\x73\xf7\x59\xd2\xe5\x4b\x91\x8e\x28" + "\x77\x1e\x5a\xe2\xcd\xce\x92\x35\xbb\x1e\xbb\xcf\x79\x94\xdf\x31" + "\xde\x31\xa8\x75\xf6\xe0\xaa\x2e\xe9\x4f\x44\xc8\xba\xb9\xab\x80" + "\x29\xa1\xea\x58\x2e\x40\x96\xa0\x1a\xf5\x2c\x38\x47\x43\x5d\x26" + "\x2c\xd8\xad\xea\xd3\xad\xe8\x51\x49\xad\x45\x2b\x25\x7c\xde\xe4" + "\xaf\x03\x2a\x39\x26\x86\x66\x10\xbc\xa8\x71\xda\xe0\xe8\xf1\xdd" + "\x50\xff\x44\xb2\xd3\xc7\xff\x66\x63\xf6\x42\xe3\x97\x9d\x9e\xf4" + "\xa6\x89\xb9\xab\x12\x17\xf2\x85\x56\x9c\x6b\x24\x71\x83\x57\x7d" + "\x3c\x7b\x2b\x88\x92\x19\xd7\x1a\x00\xd5\x38\x94\x43\x60\x4d\xa7" + "\x12\x9e\x0d\xf6\x5c\x9a\xd3\xe2\x9e\xb1\x21\xe8\xe2\x9e\xe9\x1e" + "\x9d\xa5\x94\x95\xa6\x3d\x12\x15\xd8\x8b\xac\xe0\x8c\xde\xe6\x40" + "\x98\xaa\x5e\x55\x4f\x3d\x86\x87\x0d\xe3\xc6\x68\x15\xe6\xde\x17" + "\x78\x21\xc8\x6c\x06\xc7\x94\x56\xb4\xaf\xa2\x35\x0b\x0c\x97\xd7" + "\xa4\x12\xee\xf4\xd2\xef\x80\x28\xb3\xee\xe9\x15\x8b\x01\x32\x79", + .expected_a_public = + "\x1b\x6a\xba\xea\xa3\xcc\x50\x69\xa9\x41\x89\xaf\x04\xe1\x44\x22" + "\x97\x20\xd1\xf6\x1e\xcb\x64\x36\x6f\xee\x0b\x16\xc1\xd9\x91\xbe" + "\x57\xc8\xd9\xf2\xa1\x96\x91\xec\x41\xc7\x79\x00\x1a\x48\x25\x55" + "\xbe\xf3\x20\x8c\x38\xc6\x7b\xf2\x8b\x5a\xc3\xb5\x87\x0a\x86\x3d" + "\xb7\xd6\xce\xb0\x96\x2e\x5d\xc4\x00\x5e\x42\xe4\xe5\x50\x4f\xb8" + "\x6f\x18\xa4\xe1\xd3\x20\xfc\x3c\xf5\x0a\xff\x23\xa6\x5b\xb4\x17" + "\x3e\x7b\xdf\xb9\xb5\x3c\x1b\x76\x29\xcd\xb4\x46\x4f\x27\x8f\xd2" + "\xe8\x27\x66\xdb\xe8\xb3\xf5\xe1\xd0\x04\xcd\x89\xff\xba\x76\x67" + "\xe8\x4d\xcf\x86\x1c\x8a\xd1\xcf\x99\x27\xfb\xa9\x78\xcc\x94\xaf" + "\x3d\x04\xfd\x25\xc0\x47\xfa\x29\x80\x05\xf4\xde\xad\xdb\xab\x12" + "\xb0\x2b\x8e\xca\x02\x06\x6d\xad\x3e\x09\xb1\x22\xa3\xf5\x4c\x6d" + "\x69\x99\x58\x8b\xd8\x45\x2e\xe0\xc9\x3c\xf7\x92\xce\x21\x90\x6b" + "\x3b\x65\x9f\x64\x79\x8d\x67\x22\x1a\x37\xd3\xee\x51\xe2\xe7\x5a" + "\x93\x51\xaa\x3c\x4b\x04\x16\x32\xef\xe3\x66\xbe\x18\x94\x88\x64" + "\x79\xce\x06\x3f\xb8\xd6\xee\xdc\x13\x79\x6f\x20\x14\xc2\x6b\xce" + "\xc8\xda\x42\xa5\x93\x5b\xe4\x7f\x1a\xe6\xda\x0f\xb3\xc1\x5f\x30" + "\x50\x76\xe8\x37\x3d\xca\x77\x2c\xa8\xe4\x3b\xf9\x6f\xe0\x17\xed" + "\x0e\xef\xb7\x31\x14\xb5\xea\xd9\x39\x22\x89\xb6\x40\x57\xcc\x84" + "\xef\x73\xa7\xe9\x27\x21\x85\x89\xfa\xaf\x03\xda\x9c\x8b\xfd\x52" + "\x7d\xb0\xa4\xe4\xf9\xd8\x90\x55\xc4\x39\xd6\x9d\xaf\x3b\xce\xac" + "\xaa\x36\x14\x7a\x9b\x8b\x12\x43\xe1\xca\x61\xae\x46\x5b\xe7\xe5" + "\x88\x32\x80\xa0\x2d\x51\xbb\x2f\xea\xeb\x3c\x71\xb2\xae\xce\xca" + "\x61\xd2\x76\xe0\x45\x46\x78\x4e\x09\x2d\xc2\x54\xc2\xa9\xc7\xa8" + "\x55\x8e\x72\xa4\x8b\x8a\xc9\x01\xdb\xe9\x58\x11\xa1\xc4\xe7\x12", + .expected_ss = + "\x47\x8e\xb2\x19\x09\xf0\x46\x99\x6b\x41\x86\xf7\x34\xad\xbf\x2a" + "\x18\x1b\x7d\xec\xa9\xb2\x47\x2f\x40\xfb\x9a\x64\x30\x44\xf3\x4c" + "\x01\x67\xad\x57\x5a\xbc\xd4\xc8\xef\x7e\x8a\x14\x74\x1d\x6d\x8c" + "\x7b\xce\xc5\x57\x5f\x95\xe8\x72\xba\xdf\xa3\xcd\x00\xbe\x09\x4c" + "\x06\x72\xe7\x17\xb0\xe5\xe5\xb7\x20\xa5\xcb\xd9\x68\x99\xad\x3f" + "\xde\xf3\xde\x1d\x1c\x00\x74\xd2\xd1\x57\x55\x5d\xce\x76\x0c\xc4" + "\x7a\xc4\x65\x7c\x19\x17\x0a\x09\x66\x7d\x3a\xab\xf7\x61\x3a\xe3" + "\x5b\xac\xcf\x69\xb0\x8b\xee\x5d\x28\x36\xbb\x3f\x74\xce\x6e\x38" + "\x1e\x39\xab\x26\xca\x89\xdc\x58\x59\xcb\x95\xe4\xbc\xd6\x19\x48" + "\xd0\x55\x68\x7b\xb4\x27\x95\x3c\xd9\x58\x10\x4f\x8f\x55\x1c\x3f" + "\x04\xce\x89\x1f\x82\x28\xe9\x48\x17\x47\x8f\xee\xb7\x8f\xeb\xb1" + "\x29\xa8\x23\x18\x73\x33\x9f\x83\x08\xca\xcd\x54\x6e\xca\xec\x78" + "\x7b\x16\x83\x3f\xdb\x0a\xef\xfd\x87\x94\x19\x08\x6e\x6e\x22\x57" + "\xd7\xd2\x79\xf9\xf6\xeb\xe0\x6c\x93\x9d\x95\xfa\x41\x7a\xa9\xd6" + "\x2a\xa3\x26\x9b\x24\x1b\x8b\xa0\xed\x04\xb2\xe4\x6c\x4e\xc4\x3f" + "\x61\xe5\xe0\x4d\x09\x28\xaf\x58\x35\x25\x0b\xd5\x38\x18\x69\x51" + "\x18\x51\x73\x7b\x28\x19\x9f\xe4\x69\xfc\x2c\x25\x08\x99\x8f\x62" + "\x65\x62\xa5\x28\xf1\xf4\xfb\x02\x29\x27\xb0\x5e\xbb\x4f\xf9\x1a" + "\xa7\xc4\x38\x63\x5b\x01\xfe\x00\x66\xe3\x47\x77\x21\x85\x17\xd5" + "\x34\x19\xd3\x87\xab\x44\x62\x08\x59\xb2\x6b\x1f\x21\x0c\x23\x84" + "\xf7\xba\x92\x67\xf9\x16\x85\x6a\xe0\xeb\xe7\x4f\x06\x80\x81\x81" + "\x28\x9c\xe8\x2e\x71\x97\x48\xe0\xd1\xbc\xce\xe9\x42\x2c\x89\xdf" + "\x0b\xa9\xa1\x07\x84\x33\x78\x7f\x49\x2f\x1c\x55\xc3\x7f\xc3\x37" + "\x40\xdf\x13\xf4\xa0\x21\x79\x6e\x3a\xe3\xb8\x23\x9e\x8a\x6e\x9c", + .secret_size = 400, + .b_public_size = 384, + .expected_a_public_size = 384, + .expected_ss_size = 384, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x00" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#else + "\x00\x01" /* type */ + "\x00\x10" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#endif + .b_secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x90\x01" /* len */ + "\x80\x01\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x01\x90" /* len */ + "\x00\x00\x01\x80" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x6b\xb4\x97\x23\xfa\xc8\x5e\xa9\x7b\x63\xe7\x3e\x0e\x99\xc3\xb9" + "\xda\xb7\x48\x0d\xc3\xb1\xbf\x4f\x17\xc7\xa9\x51\xf6\x64\xff\xc4" + "\x31\x58\x87\x25\x83\x2c\x00\xf0\x41\x29\xf7\xee\xf9\xe6\x36\x76" + "\xd6\x3a\x24\xbe\xa7\x07\x0b\x93\xc7\x9f\x6c\x75\x0a\x26\x75\x76" + "\xe3\x0c\x42\xe0\x00\x04\x69\xd9\xec\x0b\x59\x54\x28\x8f\xd7\x9a" + "\x63\xf4\x5b\xdf\x85\x65\xc4\xe1\x95\x27\x4a\x42\xad\x36\x47\xa9" + "\x0a\xf8\x14\x1c\xf3\x94\x3b\x7e\x47\x99\x35\xa8\x18\xec\x70\x10" + "\xdf\xcb\xd2\x78\x88\xc1\x2d\x59\x93\xc1\xa4\x6d\xd7\x1d\xb9\xd5" + "\xf8\x30\x06\x7f\x98\x90\x0c\x74\x5e\x89\x2f\x64\x5a\xad\x5f\x53" + "\xb2\xa3\xa8\x83\xbf\xfc\x37\xef\xb8\x36\x0a\x5c\x62\x81\x64\x74" + "\x16\x2f\x45\x39\x2a\x91\x26\x87\xc0\x12\xcc\x75\x11\xa3\xa1\xc5" + "\xae\x20\xcf\xcb\x20\x25\x6b\x7a\x31\x93\x9d\x38\xb9\x57\x72\x46" + "\xd4\x84\x65\x87\xf1\xb5\xd3\xab\xfc\xc3\x4d\x40\x92\x94\x1e\xcd" + "\x1c\x87\xec\x3f\xcd\xbe\xd0\x95\x6b\x40\x02\xdd\x62\xeb\x0a\xda" + "\x4f\xbe\x8e\x32\x48\x8b\x6d\x83\xa0\x96\x62\x23\xec\x83\x91\x44" + "\xf9\x72\x01\xac\xa0\xe4\x72\x1d\x5a\x75\x05\x57\x90\xae\x7e\xb4" + "\x71\x39\x01\x05\xdc\xe9\xee\xcb\xf0\x61\x28\x91\x69\x8c\x31\x03" + "\x7a\x92\x15\xa1\x58\x67\x3d\x70\x82\xa6\x2c\xfe\x10\x56\x58\xd3" + "\x94\x67\xe1\xbe\xee\xc1\x64\x5c\x4b\xc8\x28\x3d\xc5\x66\x3a\xab" + "\x22\xc1\x7e\xa1\xbb\xf3\x19\x3b\xda\x46\x82\x45\xd4\x3c\x7c\xc6" + "\xce\x1f\x7f\x95\xa2\x17\xff\x88\xba\xd6\x4d\xdb\xd2\xea\xde\x39" + "\xd6\xa5\x18\x73\xbb\x64\x6e\x79\xe9\xdc\x3f\x92\x7f\xda\x1f\x49" + "\x33\x70\x65\x73\xa2\xd9\x06\xb8\x1b\x29\x29\x1a\xe0\xa3\xe6\x05" + "\x9a\xa8\xc2\x4e\x7a\x78\x1d\x22\x57\x21\xc8\xa3\x8d\x66\x3e\x23", + .b_public = + "\x1b\x6a\xba\xea\xa3\xcc\x50\x69\xa9\x41\x89\xaf\x04\xe1\x44\x22" + "\x97\x20\xd1\xf6\x1e\xcb\x64\x36\x6f\xee\x0b\x16\xc1\xd9\x91\xbe" + "\x57\xc8\xd9\xf2\xa1\x96\x91\xec\x41\xc7\x79\x00\x1a\x48\x25\x55" + "\xbe\xf3\x20\x8c\x38\xc6\x7b\xf2\x8b\x5a\xc3\xb5\x87\x0a\x86\x3d" + "\xb7\xd6\xce\xb0\x96\x2e\x5d\xc4\x00\x5e\x42\xe4\xe5\x50\x4f\xb8" + "\x6f\x18\xa4\xe1\xd3\x20\xfc\x3c\xf5\x0a\xff\x23\xa6\x5b\xb4\x17" + "\x3e\x7b\xdf\xb9\xb5\x3c\x1b\x76\x29\xcd\xb4\x46\x4f\x27\x8f\xd2" + "\xe8\x27\x66\xdb\xe8\xb3\xf5\xe1\xd0\x04\xcd\x89\xff\xba\x76\x67" + "\xe8\x4d\xcf\x86\x1c\x8a\xd1\xcf\x99\x27\xfb\xa9\x78\xcc\x94\xaf" + "\x3d\x04\xfd\x25\xc0\x47\xfa\x29\x80\x05\xf4\xde\xad\xdb\xab\x12" + "\xb0\x2b\x8e\xca\x02\x06\x6d\xad\x3e\x09\xb1\x22\xa3\xf5\x4c\x6d" + "\x69\x99\x58\x8b\xd8\x45\x2e\xe0\xc9\x3c\xf7\x92\xce\x21\x90\x6b" + "\x3b\x65\x9f\x64\x79\x8d\x67\x22\x1a\x37\xd3\xee\x51\xe2\xe7\x5a" + "\x93\x51\xaa\x3c\x4b\x04\x16\x32\xef\xe3\x66\xbe\x18\x94\x88\x64" + "\x79\xce\x06\x3f\xb8\xd6\xee\xdc\x13\x79\x6f\x20\x14\xc2\x6b\xce" + "\xc8\xda\x42\xa5\x93\x5b\xe4\x7f\x1a\xe6\xda\x0f\xb3\xc1\x5f\x30" + "\x50\x76\xe8\x37\x3d\xca\x77\x2c\xa8\xe4\x3b\xf9\x6f\xe0\x17\xed" + "\x0e\xef\xb7\x31\x14\xb5\xea\xd9\x39\x22\x89\xb6\x40\x57\xcc\x84" + "\xef\x73\xa7\xe9\x27\x21\x85\x89\xfa\xaf\x03\xda\x9c\x8b\xfd\x52" + "\x7d\xb0\xa4\xe4\xf9\xd8\x90\x55\xc4\x39\xd6\x9d\xaf\x3b\xce\xac" + "\xaa\x36\x14\x7a\x9b\x8b\x12\x43\xe1\xca\x61\xae\x46\x5b\xe7\xe5" + "\x88\x32\x80\xa0\x2d\x51\xbb\x2f\xea\xeb\x3c\x71\xb2\xae\xce\xca" + "\x61\xd2\x76\xe0\x45\x46\x78\x4e\x09\x2d\xc2\x54\xc2\xa9\xc7\xa8" + "\x55\x8e\x72\xa4\x8b\x8a\xc9\x01\xdb\xe9\x58\x11\xa1\xc4\xe7\x12", + .secret_size = 16, + .b_secret_size = 400, + .b_public_size = 384, + .expected_a_public_size = 384, + .expected_ss_size = 384, + .genkey = true, + }, +}; -}, -/* wycheproof - private key == -1 (mod order) */ -{ - .secret = (u8[32]){ 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8, - 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 }, - .b_public = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, - 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, - 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, - 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, - .expected_ss = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, - 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, - 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, - 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, +static const struct kpp_testvec ffdhe4096_dh_tv_template[] __maybe_unused = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x02" /* len */ + "\x00\x02\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x02\x10" /* len */ + "\x00\x00\x02\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x1a\x48\xf3\x6c\x61\x03\x42\x43\xd7\x42\x3b\xfa\xdb\x55\x6f\xa2" + "\xe1\x79\x52\x0b\x47\xc5\x03\x60\x2f\x26\xb9\x1a\x14\x15\x1a\xd9" + "\xe0\xbb\xa7\x82\x63\x41\xec\x26\x55\x00\xab\xe5\x21\x9d\x31\x14" + "\x0e\xe2\xc2\xb2\xb8\x37\xe6\xc3\x5a\xab\xae\x25\xdb\x71\x1e\xed" + "\xe8\x75\x9a\x04\xa7\x92\x2a\x99\x7e\xc0\x5b\x64\x75\x7f\xe5\xb5" + "\xdb\x6c\x95\x4f\xe9\xdc\x39\x76\x79\xb0\xf7\x00\x30\x8e\x86\xe7" + "\x36\xd1\xd2\x0c\x68\x7b\x94\xe9\x91\x85\x08\x86\xbc\x64\x87\xd2" + "\xf5\x5b\xaf\x03\xf6\x5f\x28\x25\xf1\xa3\x20\x5c\x1b\xb5\x26\x45" + "\x9a\x47\xab\xd6\xad\x49\xab\x92\x8e\x62\x6f\x48\x31\xea\xf6\x76" + "\xff\xa2\xb6\x28\x78\xef\x59\xc3\x71\x5d\xa8\xd9\x70\x89\xcc\xe2" + "\x63\x58\x5e\x3a\xa2\xa2\x88\xbf\x77\x20\x84\x33\x65\x64\x4e\x73" + "\xe5\x08\xd5\x89\x23\xd6\x07\xac\x29\x65\x2e\x02\xa8\x35\x96\x48" + "\xe7\x5d\x43\x6a\x42\xcc\xda\x98\xc4\x75\x90\x2e\xf6\xc4\xbf\xd4" + "\xbc\x31\x14\x0d\x54\x30\x11\xb2\xc9\xcf\xbb\xba\xbc\xc6\xf2\xcf" + "\xfe\x4a\x9d\xf3\xec\x78\x5d\x5d\xb4\x99\xd0\x67\x0f\x5a\x21\x1c" + "\x7b\x95\x2b\xcf\x49\x44\x94\x05\x1a\x21\x81\x25\x7f\xe3\x8a\x2a" + "\xdd\x88\xac\x44\x94\x23\x20\x3b\x75\xf6\x2a\x8a\x45\xf8\xb5\x1f" + "\xb9\x8b\xeb\xab\x9b\x38\x23\x26\xf1\x0f\x34\x47\x4f\x7f\xe1\x9e" + "\x84\x84\x78\xe5\xe3\x49\xeb\xcc\x2f\x02\x85\xa4\x18\x91\xde\x1a" + "\x60\x54\x33\x81\xd5\xae\xdb\x23\x9c\x4d\xa4\xdb\x22\x5b\xdf\xf4" + "\x8e\x05\x2b\x60\xba\xe8\x75\xfc\x34\x99\xcf\x35\xe1\x06\xba\xdc" + "\x79\x2a\x5e\xec\x1c\xbe\x79\x33\x63\x1c\xe7\x5f\x1e\x30\xd6\x1b" + "\xdb\x11\xb8\xea\x63\xff\xfe\x1a\x3c\x24\xf4\x78\x9c\xcc\x5d\x9a" + "\xc9\x2d\xc4\x9a\xd4\xa7\x65\x84\x98\xdb\x66\x76\xf0\x34\x31\x9f" + "\xce\xb5\xfb\x28\x07\xde\x1e\x0d\x9b\x01\x64\xeb\x2a\x37\x2f\x20" + "\xa5\x95\x72\x2b\x54\x51\x59\x91\xea\x50\x54\x0f\x2e\xb0\x1d\xf6" + "\xb9\x46\x43\xf9\xd0\x13\x21\x20\x47\x61\x1a\x1c\x30\xc6\x9e\x75" + "\x22\xe4\xf2\xb1\xab\x01\xdc\x5b\x3c\x1e\xa2\x6d\xc0\xb9\x9a\x2a" + "\x84\x61\xea\x85\x63\xa0\x77\xd0\xeb\x20\x68\xd5\x95\x6a\x1b\x8f" + "\x1f\x9a\xba\x44\x49\x8c\x77\xa6\xd9\xa0\x14\xf8\x7d\x9b\x4e\xfa" + "\xdc\x4f\x1c\x4d\x60\x50\x26\x7f\xd6\xc1\x91\x2b\xa6\x37\x5d\x94" + "\x69\xb2\x47\x59\xd6\xc3\x59\xbb\xd6\x9b\x71\x52\x85\x7a\xcb\x2d", + .b_public = + "\x24\x38\x02\x02\x2f\xeb\x54\xdd\x73\x21\x91\x4a\xd8\xa4\x0a\xbf" + "\xf4\xf5\x9a\x45\xb5\xcd\x42\xa3\x57\xcc\x65\x4a\x23\x2e\xee\x59" + "\xba\x6f\x14\x89\xae\x2e\x14\x0a\x72\x77\x23\x7f\x6c\x2e\xba\x52" + "\x3f\x71\xbf\xe4\x60\x03\x16\xaa\x61\xf5\x80\x1d\x8a\x45\x9e\x53" + "\x7b\x07\xd9\x7e\xfe\xaf\xcb\xda\xff\x20\x71\xba\x89\x39\x75\xc3" + "\xb3\x65\x0c\xb1\xa7\xfa\x4a\xe7\xe0\x85\xc5\x4e\x91\x47\x41\xf4" + "\xdd\xcd\xc5\x3d\x17\x12\xed\xee\xc0\x31\xb1\xaf\xc1\xd5\x3c\x07" + "\xa1\x5a\xc4\x05\x45\xe3\x10\x0c\xc3\x14\xae\x65\xca\x40\xae\x31" + "\x5c\x13\x0d\x32\x85\xa7\x6e\xf4\x5e\x29\x3d\x4e\xd3\xd7\x49\x58" + "\xe1\x73\xbb\x0a\x7b\xd6\x13\xea\x49\xd7\x20\x3d\x31\xaa\x77\xab" + "\x21\x74\xe9\x2f\xe9\x5e\xbe\x2f\xb4\xa2\x79\xf2\xbc\xcc\x51\x94" + "\xd2\x1d\xb2\xe6\xc5\x39\x66\xd7\xe5\x46\x75\x53\x76\xed\x49\xea" + "\x3b\xdd\x01\x27\xdb\x83\xa5\x9f\xd2\xee\xc8\xde\x9e\xde\xd2\xe7" + "\x99\xad\x9c\xe0\x71\x66\x29\xd8\x0d\xfe\xdc\xd1\xbc\xc7\x9a\xbe" + "\x8b\x26\x46\x57\xb6\x79\xfa\xad\x8b\x45\x2e\xb5\xe5\x89\x34\x01" + "\x93\x00\x9d\xe9\x58\x74\x8b\xda\x07\x92\xb5\x01\x4a\xe1\x44\x36" + "\xc7\x6c\xde\xc8\x7a\x17\xd0\xde\xee\x68\x92\xb5\xde\x21\x2b\x1c" + "\xbc\x65\x30\x1e\xae\x15\x3d\x9a\xaf\x20\xa3\xc4\x21\x70\xfb\x2f" + "\x36\x72\x31\xc0\xe8\x85\xdf\xc5\x50\x4c\x90\x10\x32\xa4\xc7\xee" + "\x59\x5a\x21\xf4\xf1\x33\xcf\xbe\xac\x67\xb1\x40\x7c\x0b\x3f\x64" + "\xe5\xd2\x2d\xb7\x7d\x0f\xce\xf7\x9b\x05\xee\x37\x61\xd2\x61\x9e" + "\x1a\x80\x2e\x79\xe6\x1b\x25\xb3\x61\x3d\x53\xe7\xe5\x97\x9a\xc2" + "\x39\xb1\xe3\x91\xc6\xee\x96\x2e\xa9\xb4\xb8\xad\xd8\x04\x3e\x11" + "\x31\x67\xb8\x6a\xcb\x6e\x1a\x4c\x7f\x74\xc7\x1f\x09\xd1\xd0\x6b" + "\x17\xde\xea\xe8\x0b\xe6\x6a\xee\x2f\xe3\x5b\x9c\x59\x5d\x00\x57" + "\xbf\x24\x25\xba\x22\x34\xb9\xc5\x3c\xc4\x57\x26\xd0\x6d\x89\xee" + "\x67\x79\x3c\x70\xf9\xc3\xb4\x30\xf0\x2e\xca\xfa\x74\x00\xd1\x00" + "\x6d\x03\x97\xd5\x08\x3f\x0b\x8e\xb8\x1d\xa3\x91\x7f\xa9\x3a\xf0" + "\x37\x57\x46\x87\x82\xa3\xb5\x8f\x51\xaa\xc7\x7b\xfe\x86\x26\xb9" + "\xfa\xe6\x1e\xee\x92\x9d\x3a\xed\x5b\x5e\x3f\xe5\xca\x5e\x13\x01" + "\xdd\x4c\x8d\x85\xf0\x60\x61\xb7\x60\x24\x83\x9f\xbe\x72\x21\x81" + "\x55\x7e\x7e\x6d\xf3\x28\xc8\x77\x5a\xae\x5a\x32\x86\xd5\x61\xad", + .expected_a_public = + "\x1f\xff\xd6\xc4\x59\xf3\x4a\x9e\x81\x74\x4d\x27\xa7\xc6\x6b\x35" + "\xd8\xf5\xb3\x24\x97\x82\xe7\x2e\xf3\x21\x91\x23\x2f\x3d\x57\x7f" + "\x15\x8c\x84\x71\xe7\x25\x35\xe8\x07\x14\x06\x4c\x83\xdc\x55\x4a" + "\xf8\x45\xc5\xe9\xfa\x6e\xae\x6e\xcf\x4d\x11\x91\x26\x16\x6f\x86" + "\x89\x78\xaa\xb4\x25\x54\xb2\x74\x07\xe5\x26\x26\x0c\xad\xa4\x57" + "\x59\x61\x66\x71\x43\x22\xff\x49\x51\xa4\x76\x0e\x55\x7b\x60\x45" + "\x4f\xaf\xbd\x9c\xec\x64\x3f\x80\x0b\x0c\x31\x41\xf0\xfe\x2c\xb7" + "\x0a\xbe\xa5\x71\x08\x0d\x8d\x1e\x8a\x77\x9a\xd2\x90\x31\x96\xd0" + "\x3b\x31\xdc\xc6\x18\x59\x43\xa1\x19\x5a\x84\x68\x29\xad\x5e\x58" + "\xa2\x50\x3e\x83\xf5\x7a\xbd\x88\x17\x60\x89\x98\x9c\x19\x89\x27" + "\x89\xfc\x33\x87\x42\xd5\xde\x19\x14\xf2\x95\x82\x10\x87\xad\x82" + "\xdd\x6b\x51\x2d\x8d\x0e\x81\x4b\xde\xb3\x35\x6c\x0f\x4b\x56\x45" + "\x48\x87\xe9\x5a\xf9\x70\x10\x30\x8e\xa1\xbb\xa4\x70\xbf\xa0\xab" + "\x10\x31\x3c\x2c\xdc\xc4\xed\xe3\x51\xdc\xee\xd2\xa5\x5c\x4e\x6e" + "\xf6\xed\x60\x5a\xeb\xf3\x02\x19\x2a\x95\xe9\x46\xff\x37\x1b\xf0" + "\x1d\x10\x4a\x8f\x4f\x3a\x6e\xf5\xfc\x02\x6d\x09\x7d\xea\x69\x7b" + "\x13\xb0\xb6\x80\x5c\x15\x20\xa8\x4d\x15\x56\x11\x72\x49\xdb\x48" + "\x54\x40\x66\xd5\xcd\x17\x3a\x26\x95\xf6\xd7\xf2\x59\xa3\xda\xbb" + "\x26\xd0\xe5\x46\xbf\xee\x0e\x7d\xf1\xe0\x11\x02\x4d\xd3\xdc\xe2" + "\x3f\xc2\x51\x7e\xc7\x90\x33\x3c\x1c\xa0\x4c\x69\xcc\x1e\xc7\xac" + "\x17\xe0\xe5\xf4\x8c\x05\x64\x34\xfe\x84\x70\xd7\x6b\xed\xab\xf5" + "\x88\x9d\x3e\x4c\x5a\x9e\xd4\x74\xfd\xdd\x91\xd5\xd4\xcb\xbf\xf8" + "\xb7\x56\xb5\xe9\x22\xa6\x6d\x7a\x44\x05\x41\xbf\xdb\x61\x28\xc6" + "\x99\x49\x87\x3d\x28\x77\xf8\x83\x23\x7e\xa9\xa7\xee\x20\xdb\x6d" + "\x21\x50\xb7\xc9\x52\x57\x53\xa3\xcf\xdf\xd0\xf9\xb9\x62\x96\x89" + "\xf5\x5c\xa9\x8a\x11\x95\x01\x25\xc9\x81\x15\x76\xae\xf0\xc7\xc5" + "\x50\xae\x6f\xb5\xd2\x8a\x8e\x9a\xd4\x30\x55\xc6\xe9\x2c\x81\x6e" + "\x95\xf6\x45\x89\x55\x28\x34\x7b\xe5\x72\x9a\x2a\xe2\x98\x09\x35" + "\xe0\xe9\x75\x94\xe9\x34\x95\xb9\x13\x6e\xd5\xa1\x62\x5a\x1c\x94" + "\x28\xed\x84\x46\x76\x6d\x10\x37\x71\xa3\x31\x46\x64\xe4\x59\x44" + "\x17\x70\x1c\x23\xc9\x7e\xf6\xab\x8a\x24\xae\x25\xe2\xb2\x5f\x33" + "\xe4\xd7\xd3\x34\x2a\x49\x22\x16\x15\x9b\x90\x40\xda\x99\xd5\xaf", + .expected_ss = + "\xe2\xce\x0e\x4b\x64\xf3\x84\x62\x38\xfd\xe3\x6f\x69\x40\x22\xb0" + "\x73\x27\x03\x12\x82\xa4\x6e\x03\x57\xec\x3d\xa0\xc1\x4f\x4b\x09" + "\xa1\xd4\xe0\x1a\x5d\x91\x2e\x08\xad\x57\xfa\xcc\x55\x90\x5f\xa0" + "\x52\x27\x62\x8d\xe5\x2d\xa1\x5f\xf0\x30\x43\x77\x4e\x3f\x02\x58" + "\xcb\xa0\x51\xae\x1d\x24\xf9\x0a\xd1\x36\x0b\x95\x0f\x07\xd9\xf7" + "\xe2\x36\x14\x2f\xf0\x11\xc2\xc9\xaf\x66\x4e\x0d\xb4\x60\x01\x4e" + "\xa8\x49\xc6\xec\x5f\xb2\xbc\x05\x48\x91\x4e\xe1\xc3\x99\x9f\xeb" + "\x4a\xc1\xde\x05\x9a\x65\x39\x7d\x2f\x89\x85\xb2\xcf\xec\x25\x27" + "\x5f\x1c\x11\x63\xcf\x7b\x86\x98\x39\xae\xc2\x16\x8f\x79\xd1\x20" + "\xd0\xb4\xa0\xba\x44\xd8\xf5\x3a\x0a\x08\x4c\xd1\xb9\xdd\x0a\x5b" + "\x9e\x62\xf3\x52\x0c\x84\x12\x43\x9b\xd7\xdf\x86\x71\x03\xdd\x04" + "\x98\x55\x0c\x7b\xe2\xe8\x03\x17\x25\x84\xd9\xbd\xe1\xce\x64\xbe" + "\xca\x55\xd4\x5b\xef\x61\x5b\x68\x4b\x80\x37\x40\xae\x28\x87\x81" + "\x55\x34\x96\x50\x21\x47\x49\xc0\xda\x26\x46\xb8\xe8\xcc\x5a\x27" + "\x9c\x9d\x0a\x3d\xcc\x4c\x63\x27\x81\x82\x2e\xf4\xa8\x91\x37\x3e" + "\xa7\x34\x6a\x0f\x60\x44\xdd\x2e\xdc\xf9\x19\xf2\x2e\x81\x05\x51" + "\x16\xbc\xc0\x85\xa5\xd5\x08\x09\x1f\xcd\xed\xa4\xc5\xdb\x16\x43" + "\xb5\x7a\x71\x66\x19\x2e\xef\x13\xbc\x40\x39\x0a\x00\x45\x7e\x61" + "\xe9\x68\x60\x83\x00\x70\xd1\x71\xd3\xa2\x61\x3e\x00\x46\x93\x0d" + "\xbf\xe6\xa2\x07\xe6\x40\x1a\xf4\x57\xc6\x67\x39\xd8\xd7\x6b\xc5" + "\xa5\xd8\x38\x78\x12\xb4\x97\x12\xbe\x97\x13\xef\xe4\x74\x0c\xe0" + "\x75\x89\x64\xf4\xe8\x85\xda\x84\x7b\x1d\xfe\xdd\x21\xba\xda\x01" + "\x52\xdc\x59\xe5\x47\x50\x7e\x15\x20\xd0\x43\x37\x6e\x48\x39\x00" + "\xee\xd9\x54\x6d\x00\x65\xc9\x4b\x85\xa2\x8a\x40\x55\xd0\x63\x0c" + "\xb5\x7a\x0d\x37\x67\x27\x73\x18\x7f\x5a\xf5\x0e\x22\xb9\xb0\x3f" + "\xda\xf1\xec\x7c\x24\x01\x49\xa9\x09\x0e\x0f\xc4\xa9\xef\xc8\x2b" + "\x13\xd1\x0a\x6f\xf8\x92\x4b\x1d\xdd\x6c\x9c\x35\xde\x75\x46\x32" + "\xe6\xfb\xda\x58\xba\x81\x08\xca\xa9\xb6\x69\x71\x96\x2a\x1f\x2e" + "\x25\xe0\x37\xfe\xee\x4d\x27\xaa\x04\xda\x95\xbb\x93\xcf\x8f\xa2" + "\x1d\x67\x35\xe3\x51\x8f\x87\x3b\xa9\x62\x05\xee\x44\xb7\x2e\xd0" + "\x07\x63\x32\xf5\xcd\x64\x18\x20\xcf\x22\x42\x28\x22\x1a\xa8\xbb" + "\x74\x8a\x6f\x2a\xea\x8a\x48\x0a\xad\xd7\xed\xba\xa3\x89\x37\x01", + .secret_size = 528, + .b_public_size = 512, + .expected_a_public_size = 512, + .expected_ss_size = 512, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x00" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#else + "\x00\x01" /* type */ + "\x00\x10" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#endif + .b_secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x02" /* len */ + "\x00\x02\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x02\x10" /* len */ + "\x00\x00\x02\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x1a\x48\xf3\x6c\x61\x03\x42\x43\xd7\x42\x3b\xfa\xdb\x55\x6f\xa2" + "\xe1\x79\x52\x0b\x47\xc5\x03\x60\x2f\x26\xb9\x1a\x14\x15\x1a\xd9" + "\xe0\xbb\xa7\x82\x63\x41\xec\x26\x55\x00\xab\xe5\x21\x9d\x31\x14" + "\x0e\xe2\xc2\xb2\xb8\x37\xe6\xc3\x5a\xab\xae\x25\xdb\x71\x1e\xed" + "\xe8\x75\x9a\x04\xa7\x92\x2a\x99\x7e\xc0\x5b\x64\x75\x7f\xe5\xb5" + "\xdb\x6c\x95\x4f\xe9\xdc\x39\x76\x79\xb0\xf7\x00\x30\x8e\x86\xe7" + "\x36\xd1\xd2\x0c\x68\x7b\x94\xe9\x91\x85\x08\x86\xbc\x64\x87\xd2" + "\xf5\x5b\xaf\x03\xf6\x5f\x28\x25\xf1\xa3\x20\x5c\x1b\xb5\x26\x45" + "\x9a\x47\xab\xd6\xad\x49\xab\x92\x8e\x62\x6f\x48\x31\xea\xf6\x76" + "\xff\xa2\xb6\x28\x78\xef\x59\xc3\x71\x5d\xa8\xd9\x70\x89\xcc\xe2" + "\x63\x58\x5e\x3a\xa2\xa2\x88\xbf\x77\x20\x84\x33\x65\x64\x4e\x73" + "\xe5\x08\xd5\x89\x23\xd6\x07\xac\x29\x65\x2e\x02\xa8\x35\x96\x48" + "\xe7\x5d\x43\x6a\x42\xcc\xda\x98\xc4\x75\x90\x2e\xf6\xc4\xbf\xd4" + "\xbc\x31\x14\x0d\x54\x30\x11\xb2\xc9\xcf\xbb\xba\xbc\xc6\xf2\xcf" + "\xfe\x4a\x9d\xf3\xec\x78\x5d\x5d\xb4\x99\xd0\x67\x0f\x5a\x21\x1c" + "\x7b\x95\x2b\xcf\x49\x44\x94\x05\x1a\x21\x81\x25\x7f\xe3\x8a\x2a" + "\xdd\x88\xac\x44\x94\x23\x20\x3b\x75\xf6\x2a\x8a\x45\xf8\xb5\x1f" + "\xb9\x8b\xeb\xab\x9b\x38\x23\x26\xf1\x0f\x34\x47\x4f\x7f\xe1\x9e" + "\x84\x84\x78\xe5\xe3\x49\xeb\xcc\x2f\x02\x85\xa4\x18\x91\xde\x1a" + "\x60\x54\x33\x81\xd5\xae\xdb\x23\x9c\x4d\xa4\xdb\x22\x5b\xdf\xf4" + "\x8e\x05\x2b\x60\xba\xe8\x75\xfc\x34\x99\xcf\x35\xe1\x06\xba\xdc" + "\x79\x2a\x5e\xec\x1c\xbe\x79\x33\x63\x1c\xe7\x5f\x1e\x30\xd6\x1b" + "\xdb\x11\xb8\xea\x63\xff\xfe\x1a\x3c\x24\xf4\x78\x9c\xcc\x5d\x9a" + "\xc9\x2d\xc4\x9a\xd4\xa7\x65\x84\x98\xdb\x66\x76\xf0\x34\x31\x9f" + "\xce\xb5\xfb\x28\x07\xde\x1e\x0d\x9b\x01\x64\xeb\x2a\x37\x2f\x20" + "\xa5\x95\x72\x2b\x54\x51\x59\x91\xea\x50\x54\x0f\x2e\xb0\x1d\xf6" + "\xb9\x46\x43\xf9\xd0\x13\x21\x20\x47\x61\x1a\x1c\x30\xc6\x9e\x75" + "\x22\xe4\xf2\xb1\xab\x01\xdc\x5b\x3c\x1e\xa2\x6d\xc0\xb9\x9a\x2a" + "\x84\x61\xea\x85\x63\xa0\x77\xd0\xeb\x20\x68\xd5\x95\x6a\x1b\x8f" + "\x1f\x9a\xba\x44\x49\x8c\x77\xa6\xd9\xa0\x14\xf8\x7d\x9b\x4e\xfa" + "\xdc\x4f\x1c\x4d\x60\x50\x26\x7f\xd6\xc1\x91\x2b\xa6\x37\x5d\x94" + "\x69\xb2\x47\x59\xd6\xc3\x59\xbb\xd6\x9b\x71\x52\x85\x7a\xcb\x2d", + .b_public = + "\x1f\xff\xd6\xc4\x59\xf3\x4a\x9e\x81\x74\x4d\x27\xa7\xc6\x6b\x35" + "\xd8\xf5\xb3\x24\x97\x82\xe7\x2e\xf3\x21\x91\x23\x2f\x3d\x57\x7f" + "\x15\x8c\x84\x71\xe7\x25\x35\xe8\x07\x14\x06\x4c\x83\xdc\x55\x4a" + "\xf8\x45\xc5\xe9\xfa\x6e\xae\x6e\xcf\x4d\x11\x91\x26\x16\x6f\x86" + "\x89\x78\xaa\xb4\x25\x54\xb2\x74\x07\xe5\x26\x26\x0c\xad\xa4\x57" + "\x59\x61\x66\x71\x43\x22\xff\x49\x51\xa4\x76\x0e\x55\x7b\x60\x45" + "\x4f\xaf\xbd\x9c\xec\x64\x3f\x80\x0b\x0c\x31\x41\xf0\xfe\x2c\xb7" + "\x0a\xbe\xa5\x71\x08\x0d\x8d\x1e\x8a\x77\x9a\xd2\x90\x31\x96\xd0" + "\x3b\x31\xdc\xc6\x18\x59\x43\xa1\x19\x5a\x84\x68\x29\xad\x5e\x58" + "\xa2\x50\x3e\x83\xf5\x7a\xbd\x88\x17\x60\x89\x98\x9c\x19\x89\x27" + "\x89\xfc\x33\x87\x42\xd5\xde\x19\x14\xf2\x95\x82\x10\x87\xad\x82" + "\xdd\x6b\x51\x2d\x8d\x0e\x81\x4b\xde\xb3\x35\x6c\x0f\x4b\x56\x45" + "\x48\x87\xe9\x5a\xf9\x70\x10\x30\x8e\xa1\xbb\xa4\x70\xbf\xa0\xab" + "\x10\x31\x3c\x2c\xdc\xc4\xed\xe3\x51\xdc\xee\xd2\xa5\x5c\x4e\x6e" + "\xf6\xed\x60\x5a\xeb\xf3\x02\x19\x2a\x95\xe9\x46\xff\x37\x1b\xf0" + "\x1d\x10\x4a\x8f\x4f\x3a\x6e\xf5\xfc\x02\x6d\x09\x7d\xea\x69\x7b" + "\x13\xb0\xb6\x80\x5c\x15\x20\xa8\x4d\x15\x56\x11\x72\x49\xdb\x48" + "\x54\x40\x66\xd5\xcd\x17\x3a\x26\x95\xf6\xd7\xf2\x59\xa3\xda\xbb" + "\x26\xd0\xe5\x46\xbf\xee\x0e\x7d\xf1\xe0\x11\x02\x4d\xd3\xdc\xe2" + "\x3f\xc2\x51\x7e\xc7\x90\x33\x3c\x1c\xa0\x4c\x69\xcc\x1e\xc7\xac" + "\x17\xe0\xe5\xf4\x8c\x05\x64\x34\xfe\x84\x70\xd7\x6b\xed\xab\xf5" + "\x88\x9d\x3e\x4c\x5a\x9e\xd4\x74\xfd\xdd\x91\xd5\xd4\xcb\xbf\xf8" + "\xb7\x56\xb5\xe9\x22\xa6\x6d\x7a\x44\x05\x41\xbf\xdb\x61\x28\xc6" + "\x99\x49\x87\x3d\x28\x77\xf8\x83\x23\x7e\xa9\xa7\xee\x20\xdb\x6d" + "\x21\x50\xb7\xc9\x52\x57\x53\xa3\xcf\xdf\xd0\xf9\xb9\x62\x96\x89" + "\xf5\x5c\xa9\x8a\x11\x95\x01\x25\xc9\x81\x15\x76\xae\xf0\xc7\xc5" + "\x50\xae\x6f\xb5\xd2\x8a\x8e\x9a\xd4\x30\x55\xc6\xe9\x2c\x81\x6e" + "\x95\xf6\x45\x89\x55\x28\x34\x7b\xe5\x72\x9a\x2a\xe2\x98\x09\x35" + "\xe0\xe9\x75\x94\xe9\x34\x95\xb9\x13\x6e\xd5\xa1\x62\x5a\x1c\x94" + "\x28\xed\x84\x46\x76\x6d\x10\x37\x71\xa3\x31\x46\x64\xe4\x59\x44" + "\x17\x70\x1c\x23\xc9\x7e\xf6\xab\x8a\x24\xae\x25\xe2\xb2\x5f\x33" + "\xe4\xd7\xd3\x34\x2a\x49\x22\x16\x15\x9b\x90\x40\xda\x99\xd5\xaf", + .secret_size = 16, + .b_secret_size = 528, + .b_public_size = 512, + .expected_a_public_size = 512, + .expected_ss_size = 512, + .genkey = true, + }, +}; -}, -/* wycheproof - private key == 1 (mod order) on twist */ -{ - .secret = (u8[32]){ 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef, - 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f }, - .b_public = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, - 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, - 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, - 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, - .expected_ss = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, - 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, - 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, - 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, - .secret_size = 32, - .b_public_size = 32, - .expected_ss_size = 32, +static const struct kpp_testvec ffdhe6144_dh_tv_template[] __maybe_unused = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x03" /* len */ + "\x00\x03\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x03\x10" /* len */ + "\x00\x00\x03\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x63\x3e\x6f\xe0\xfe\x9f\x4a\x01\x62\x77\xce\xf1\xc7\xcc\x49\x4d" + "\x92\x53\x56\xe3\x39\x15\x81\xb2\xcd\xdc\xaf\x5e\xbf\x31\x1f\x69" + "\xce\x41\x35\x24\xaa\x46\x53\xb5\xb7\x3f\x2b\xad\x95\x14\xfb\xe4" + "\x9a\x61\xcd\x0f\x1f\x02\xee\xa4\x79\x2c\x9d\x1a\x7c\x62\x82\x39" + "\xdd\x43\xcc\x58\x9f\x62\x47\x56\x1d\x0f\xc2\x67\xbc\x24\xd0\xf9" + "\x0a\x50\x1b\x10\xe7\xbb\xd1\xc2\x01\xbb\xc4\x4c\xda\x12\x60\x0e" + "\x95\x2b\xde\x09\xd6\x67\xe1\xbc\x4c\xb9\x67\xdf\xd0\x1f\x97\xb4" + "\xde\xcb\x6b\x78\x83\x51\x74\x33\x01\x7f\xf6\x0a\x95\x69\x93\x00" + "\x2a\xc3\x75\x8e\xef\xbe\x53\x11\x6d\xc4\xd0\x9f\x6d\x63\x48\xc1" + "\x91\x1f\x7d\x88\xa7\x90\x78\xd1\x7e\x52\x42\x10\x01\xb4\x27\x95" + "\x91\x43\xcc\x82\x91\x86\x62\xa0\x9d\xef\x65\x6e\x67\xcf\x19\x11" + "\x35\x37\x5e\x94\x97\x83\xa6\x83\x1c\x7e\x8a\x3e\x32\xb0\xce\xff" + "\x20\xdc\x7b\x6e\x18\xd9\x6b\x27\x31\xfc\xc3\xef\x47\x8d\xbe\x34" + "\x2b\xc7\x60\x74\x3c\x93\xb3\x8e\x54\x77\x4e\x73\xe6\x40\x72\x35" + "\xb0\xf0\x06\x53\x43\xbe\xd0\xc3\x87\xcc\x38\x96\xa9\x10\xa0\xd6" + "\x17\xed\xa5\x6a\xf4\xf6\xaa\x77\x40\xed\x7d\x2e\x58\x0f\x5b\x04" + "\x5a\x41\x12\x95\x22\xcb\xa3\xce\x8b\x6d\x6d\x89\xec\x7c\x1d\x25" + "\x27\x52\x50\xa0\x5b\x93\x8c\x5d\x3f\x56\xb9\xa6\x5e\xe5\xf7\x9b" + "\xc7\x9a\x4a\x2e\x79\xb5\xca\x29\x58\x52\xa0\x63\xe4\x9d\xeb\x4c" + "\x4c\xa8\x37\x0b\xe9\xa0\x18\xf1\x86\xf6\x4d\x32\xfb\x9e\x4f\xb3" + "\x7b\x5d\x58\x78\x70\xbd\x56\xac\x99\x75\x25\x71\x66\x76\x4e\x5e" + "\x67\x4f\xb1\x17\xa7\x8b\x55\x12\x87\x01\x4e\xd1\x66\xef\xd0\x70" + "\xaf\x14\x34\xee\x2a\x76\x49\x25\xa6\x2e\x43\x37\x75\x7d\x1a\xad" + "\x08\xd5\x01\x85\x9c\xe1\x20\xd8\x38\x5c\x57\xa5\xed\x9d\x46\x3a" + "\xb7\x46\x60\x29\x8b\xc4\x21\x50\x0a\x30\x9c\x57\x42\xe4\x35\xf8" + "\x12\x5c\x4f\xa2\x20\xc2\xc9\x43\xe3\x6d\x20\xbc\xdf\xb8\x37\x33" + "\x45\x43\x06\x4e\x08\x6f\x8a\xcd\x61\xc3\x1b\x05\x28\x82\xbe\xf0" + "\x48\x33\xe5\x93\xc9\x1a\x61\x16\x67\x03\x9d\x47\x9d\x74\xeb\xae" + "\x13\xf2\xb4\x1b\x09\x11\xf5\x15\xcb\x28\xfd\x50\xe0\xbc\x58\x36" + "\x38\x91\x2c\x07\x27\x1f\x49\x68\xf4\xce\xad\xf7\xba\xec\x5d\x3d" + "\xfd\x27\xe2\xcf\xf4\x56\xfe\x08\xa6\x11\x61\xcb\x6c\x9f\xf9\x3c" + "\x57\x0b\x8b\xaa\x00\x16\x18\xba\x1f\xe8\x4f\x01\xe2\x79\x2a\x0b" + "\xc1\xbd\x52\xef\xe6\xf7\x5a\x66\xfe\x07\x3b\x50\x6b\xbb\xcb\x39" + "\x3c\x94\xf6\x21\x0d\x68\x69\xa4\xed\x2e\xb5\x85\x03\x11\x38\x79" + "\xec\xb5\x22\x23\xdf\x9e\xad\xb4\xbe\xd7\xc7\xdf\xea\x30\x23\x8a" + "\xb7\x21\x0a\x9d\xbd\x99\x13\x7d\x5f\x7e\xaf\x28\x54\x3f\xca\x5e" + "\xf4\xfc\x05\x0d\x65\x67\xd8\xf6\x8e\x90\x9d\x0d\xcf\x62\x82\xd6" + "\x9f\x02\xf8\xca\xfa\x42\x24\x7f\x4d\xb7\xfc\x92\xa6\x4a\x51\xc4" + "\xd8\xae\x19\x87\xc6\xa3\x83\xbe\x7b\x6d\xc3\xf5\xb8\xad\x4a\x05" + "\x78\x84\x3a\x15\x2e\x40\xbe\x79\xa9\xc0\x12\xa1\x48\x39\xc3\xdb" + "\x47\x4f\x7d\xea\x6d\xc7\xfa\x2c\x4e\xe9\xa5\x85\x81\xea\x6c\xcd" + "\x8a\xe5\x74\x17\x76\x31\x31\x75\x96\x83\xca\x81\xbb\x5c\xa9\x79" + "\x2c\xbd\x09\xfe\xe4\x86\x0d\x8c\x76\x9c\xbc\xe8\x93\xe4\xd0\xe4" + "\x0f\xf8\xff\x24\x7e\x66\x61\x69\xfb\xe4\x46\x08\x94\x99\xa5\x53" + "\xd7\xe4\x29\x72\x86\x86\xe8\x1d\x37\xfa\xcb\xd0\x8d\x51\xd0\xbf" + "\x81\xcf\x55\xb9\xc5\x78\x8c\x74\xa0\x16\x3a\xd2\x19\x94\x29\x6a" + "\x5e\xec\xd3\x20\xa0\xb2\xfd\xce\xd4\x14\xa3\x39\x10\xa9\xf4\x4e" + "\xba\x21\x09\x5c\xe6\x61\x43\x51\xae\xc4\x71\xd7\x21\xef\x98\x39", + .b_public = + "\x30\x31\xbe\x43\xd0\x14\x22\x6b\x4b\x8c\x9a\xca\xc6\xdd\xe5\x99" + "\xce\xb8\x30\x23\xb6\xa8\x8c\x4d\xfa\xef\xad\xa6\x6a\x21\x50\xa6" + "\x45\x2d\x19\x2a\x29\x81\xc5\xac\xb4\xa8\x5f\x6d\x5b\xc8\x5f\x12" + "\x35\x21\xfb\x37\xaa\x0c\x79\xeb\xd4\x83\x01\xda\xa3\xf3\x51\x6e" + "\x17\xf9\xef\x3f\xbd\x2f\xd2\x43\x82\x12\x48\xeb\x61\x4c\x8e\xf2" + "\x6c\x76\xf9\x6d\x42\x2a\xcb\x10\x13\x3b\xf6\x9b\xcd\x46\x1e\xa2" + "\xa7\x2c\x08\x56\xd2\x42\xf5\x03\xf0\x3e\xef\xa2\xa2\xf2\x4c\xf2" + "\xdb\x4f\xeb\x40\x15\x53\x27\xf7\xd4\x8e\x58\x23\xf5\x2c\x88\x04" + "\x1e\xb1\xb6\xe3\xd6\x9c\x49\x08\xa1\x4b\xb8\x33\xe4\x75\x85\xa1" + "\x86\x97\xce\x1d\xe9\x9f\xe2\xd8\xf2\x7e\xad\xdc\x8a\x4d\xbd\x06" + "\x52\x00\x9a\x2c\x69\xdd\x02\x0c\x69\x5a\xf9\x1d\xfd\xdc\xfb\x82" + "\xb2\xe5\xf3\x24\xba\xd1\x09\x76\x90\xb5\x7a\x92\xa6\x6b\x97\xc0" + "\xce\x13\x9b\x4b\xbc\x30\x91\xb2\x13\x8b\x57\x6c\x8b\x66\x6e\x58" + "\x3e\x91\x50\xc7\x6c\xe1\x18\xec\xbf\x69\xcd\xcb\xa0\xbc\x0d\x05" + "\xc4\xf8\x45\x92\xe0\x05\xd3\x08\xb3\x30\x19\xc8\x80\xf8\x17\x9f" + "\x1e\x6a\x49\x8e\x43\xef\x7a\x49\xa5\x93\xd9\xed\xd1\x07\x03\xe4" + "\xa3\x55\xeb\x1e\x2f\x69\xd7\x40\x8f\x6e\x1c\xb6\x94\xfb\xba\x4e" + "\x46\xd0\x38\x71\x00\x88\x93\x6a\x55\xfc\x16\x95\x1f\xb1\xf6\x2f" + "\x26\x45\x50\x54\x30\x62\x62\xe8\x80\xe5\x24\x0b\xe4\x15\x6b\x32" + "\x16\xc2\x30\x9b\x56\xb4\xc9\x5e\x50\xb4\x27\x82\x86\x01\xda\x68" + "\x44\x4b\x15\x81\x31\x13\x52\xd8\x08\xbc\xae\xf3\xa5\x94\x1c\x81" + "\xe8\x42\xd6\x42\xd6\xff\x99\x58\x0f\x61\x3e\x82\x9e\x2d\x13\x03" + "\x54\x02\x74\xf4\x6b\x43\x43\xce\x54\x44\x36\x3f\x55\xfa\xb2\x56" + "\xdc\xac\xb5\x65\x89\xbe\x36\xd2\x58\x65\x79\x4c\xf3\xe2\x01\xf1" + "\x69\x96\x29\x20\x5d\xee\xf5\x8a\x8b\x9f\x72\xf7\x27\x02\xde\x3b" + "\xc7\x52\x19\xdc\x8e\x22\x36\x09\x14\x59\x07\xbb\x1e\x49\x69\x4f" + "\x00\x7b\x9a\x5d\x23\xe9\xbe\x0d\x52\x90\xa3\x0d\xde\xe7\x80\x57" + "\x53\x69\x39\xe6\xf8\x33\xeb\x92\x0d\x9e\x04\x8b\x16\x16\x16\x1c" + "\xa9\xe6\xe3\x0e\x0a\xc6\xf6\x61\xd1\x44\x2b\x3e\x5e\x02\xfe\xaa" + "\xe3\xf3\x8f\xf9\xc8\x20\x37\xad\xbc\x95\xb8\xc5\xe7\x95\xda\xfb" + "\x80\x5b\xf6\x40\x28\xae\xc1\x4c\x09\xde\xff\x1e\xbf\x51\xd2\xfe" + "\x08\xdc\xb0\x48\x21\xf5\x4c\x43\xdc\x7b\x69\x83\xc8\x69\x5c\xc4" + "\xa9\x98\x76\x4b\xc4\x4a\xac\x1d\xa5\x52\xe3\x35\x43\xdd\x30\xd4" + "\xa0\x51\x9c\xc2\x62\x4c\x7e\xa5\xfb\xd3\x2c\x8a\x09\x7f\x53\xa3" + "\xcd\xca\x58\x1b\x4c\xaf\xba\x21\x8b\x88\x1d\xc0\xe9\x0a\x17\x30" + "\x33\xd6\xa2\xa5\x49\x50\x61\x3b\xff\x37\x71\x66\xef\x61\xbc\xb2" + "\x53\x82\xe5\x70\xef\x32\xff\x9d\x97\xe0\x82\xe0\xbb\x49\xc2\x29" + "\x58\x89\xdd\xe9\x62\x52\xfb\xba\x22\xa6\xd9\x16\xfa\x55\xb3\x06" + "\xed\x6d\x70\x6e\xdc\x47\x7c\x67\x1a\xcc\x27\x98\xd4\xd7\xe6\xf0" + "\xf8\x9f\x51\x3e\xf0\xee\xad\xb6\x78\x69\x71\xb5\xcb\x09\xa3\xa6" + "\x3f\x29\x24\x46\xe0\x65\xbc\x9f\x6c\xe9\xf9\x49\x49\x96\x75\xe5" + "\xe1\xff\x82\x70\xf4\x7e\xff\x8f\xec\x47\x98\x6d\x5b\x88\x60\xee" + "\x43\xb1\xe2\x14\xc1\x49\x95\x74\x46\xd3\x3f\x73\xb2\xe9\x88\xe0" + "\xd3\xb1\xc4\x2c\xef\xee\xdd\x6c\xc5\xa1\x29\xef\x86\xd2\x36\x8a" + "\x2f\x7c\x9d\x28\x0a\x6d\xc9\x5a\xdb\xd4\x04\x06\x36\x96\x09\x03" + "\x71\x5d\x38\x67\xa2\x08\x2a\x04\xe7\xd6\x51\x5a\x19\x9d\xe7\xf1" + "\x5d\x6f\xe2\xff\x48\x37\xb7\x8b\xb1\x14\xb4\x96\xcd\xf0\xa7\xbd" + "\xef\x20\xff\x0a\x8d\x08\xb7\x15\x98\x5a\x13\xd2\xda\x2a\x27\x75", + .expected_a_public = + "\x45\x96\x5a\xb7\x78\x5c\xa4\x4d\x39\xb2\x5f\xc8\xc2\xaa\x1a\xf4" + "\xa6\x68\xf6\x6f\x7e\xa8\x4a\x5b\x0e\xba\x0a\x99\x85\xf9\x63\xd4" + "\x58\x21\x6d\xa8\x3c\xf4\x05\x10\xb0\x0d\x6f\x1c\xa0\x17\x85\xae" + "\x68\xbf\xcc\x00\xc8\x86\x1b\x24\x31\xc9\x49\x23\x91\xe0\x71\x29" + "\x06\x39\x39\x93\x49\x9c\x75\x18\x1a\x8b\x61\x73\x1c\x7f\x37\xd5" + "\xf1\xab\x20\x5e\x62\x25\xeb\x58\xd5\xfa\xc9\x7f\xad\x57\xd5\xcc" + "\x0d\xc1\x7a\x2b\x33\x2a\x76\x84\x33\x26\x97\xcf\x47\x9d\x72\x2a" + "\xc9\x39\xde\xa8\x42\x27\x2d\xdc\xee\x00\x60\xd2\x4f\x13\xe0\xde" + "\xd5\xc7\xf6\x7d\x8b\x2a\x43\x49\x40\x99\xc2\x61\x84\x8e\x57\x09" + "\x7c\xcc\x19\x46\xbd\x4c\xd2\x7c\x7d\x02\x4d\x88\xdf\x58\x24\x80" + "\xeb\x19\x3b\x2a\x13\x2b\x19\x85\x3c\xd8\x31\x03\x00\xa4\xd4\x57" + "\x23\x2c\x24\x37\xb3\x62\xea\x35\x29\xd0\x2c\xac\xfd\xbd\xdf\x3d" + "\xa6\xce\xfa\x0d\x5b\xb6\x15\x8b\xe3\x58\xe9\xad\x99\x87\x29\x51" + "\x8d\x97\xd7\xa9\x55\xf0\x72\x6e\x4e\x58\xcb\x2b\x4d\xbd\xd0\x48" + "\x7d\x14\x86\xdb\x3f\xa2\x5f\x6e\x35\x4a\xe1\x70\xb1\x53\x72\xb7" + "\xbc\xe9\x3d\x1b\x33\xc0\x54\x6f\x43\x55\x76\x85\x7f\x9b\xa5\xb3" + "\xc1\x1d\xd3\xfe\xe2\xd5\x96\x3d\xdd\x92\x04\xb1\xad\x75\xdb\x13" + "\x4e\x49\xfc\x35\x34\xc5\xda\x13\x98\xb8\x12\xbe\xda\x90\x55\x7c" + "\x11\x6c\xbe\x2b\x8c\x51\x29\x23\xc1\x51\xbc\x0c\x1c\xe2\x20\xfc" + "\xfe\xf2\xaa\x71\x9b\x21\xdf\x25\x1f\x68\x21\x7e\xe1\xc9\x87\xa0" + "\x20\xf6\x8d\x4f\x27\x8c\x3c\x0f\x9d\xf4\x69\x25\xaa\x49\xab\x94" + "\x22\x5a\x92\x3a\xba\xb4\xc2\x8c\x5a\xaa\x04\xbf\x46\xc5\xaa\x93" + "\xab\x0d\xe9\x54\x6c\x3a\x64\xa6\xa2\x21\x66\xee\x1c\x10\x21\x84" + "\xf2\x9e\xcc\x57\xac\xc2\x25\x62\xad\xbb\x59\xef\x25\x61\x6c\x81" + "\x38\x8a\xdc\x8c\xeb\x7b\x18\x1d\xaf\xa9\xc5\x9a\xf4\x49\x26\x8a" + "\x25\xc4\x3e\x31\x95\x28\xef\xf7\x72\xe9\xc5\xaa\x59\x72\x2b\x67" + "\x47\xe8\x6b\x51\x05\x24\xb8\x18\xb3\x34\x0f\x8c\x2b\x80\xba\x61" + "\x1c\xbe\x9e\x9a\x7c\xe3\x60\x5e\x49\x02\xff\x50\x8a\x64\x28\x64" + "\x46\x7b\x83\x14\x72\x6e\x59\x9b\x56\x09\xb4\xf0\xde\x52\xc3\xf3" + "\x58\x17\x6a\xae\xb1\x0f\xf4\x39\xcc\xd8\xce\x4d\xe1\x51\x17\x88" + "\xe4\x98\xd9\xd1\xa9\x55\xbc\xbf\x7e\xc4\x51\x96\xdb\x44\x1d\xcd" + "\x8d\x74\xad\xa7\x8f\x87\x83\x75\xfc\x36\xb7\xd2\xd4\x89\x16\x97" + "\xe4\xc6\x2a\xe9\x65\xc8\xca\x1c\xbd\x86\xaf\x57\x80\xf7\xdd\x42" + "\xc0\x3b\x3f\x87\x51\x02\x2f\xf8\xd8\x68\x0f\x3d\x95\x2d\xf1\x67" + "\x09\xa6\x5d\x0b\x7e\x01\xb4\xb2\x32\x01\xa8\xd0\x58\x0d\xe6\xa2" + "\xd8\x4b\x22\x10\x7d\x11\xf3\xc2\x4e\xb8\x43\x8e\x31\x79\x59\xe2" + "\xc4\x96\x29\x17\x40\x06\x0d\xdf\xdf\xc3\x02\x30\x2a\xd1\x8e\xf2" + "\xee\x2d\xd2\x12\x63\x5a\x1d\x3c\xba\x4a\xc4\x56\x90\xc6\x12\x0b" + "\xe0\x04\x3f\x35\x59\x8e\x40\x75\xf4\x4c\x10\x61\xb9\x30\x89\x7c" + "\x8d\x0e\x25\xb7\x5a\x6b\x97\x05\xc6\x37\x80\x6e\x94\x56\xa8\x5f" + "\x03\x94\x59\xc8\xc5\x3e\xdc\x23\xe5\x68\x4f\xd7\xbb\x6d\x7e\xc1" + "\x8d\xf9\xcc\x3f\x38\xad\x77\xb3\x18\x61\xed\x04\xc0\x71\xa7\x96" + "\xb1\xaf\x1d\x69\x78\xda\x6d\x89\x8b\x50\x75\x99\x44\xb3\xb2\x75" + "\xd1\xc8\x14\x40\xa1\x0a\xbf\xc4\x45\xc4\xee\x12\x90\x76\x26\x64" + "\xb7\x73\x2e\x0b\x0c\xfa\xc3\x55\x29\x24\x1b\x7a\x00\x27\x07\x26" + "\x36\xf0\x38\x1a\xe3\xb7\xc4\x8d\x1c\x9c\xa9\xc0\xc1\x45\x91\x9e" + "\x86\xdd\x82\x94\x45\xfa\xcd\x5a\x19\x12\x7d\xef\xda\x17\xad\x21" + "\x17\x89\x8b\x45\xa7\xf5\xed\x51\x9e\x58\x13\xdc\x84\xa4\xe6\x37", + .expected_ss = + "\x9a\x9c\x1c\xb7\x73\x2f\xf2\x12\xed\x59\x01\xbb\x75\xf7\xf5\xe4" + "\xa0\xa8\xbc\x3f\x3f\xb6\xf7\x74\x6e\xc4\xba\x6d\x6c\x4d\x93\x31" + "\x2b\xa7\xa4\xb3\x47\x8f\x77\x04\xb5\xa5\xab\xca\x6b\x5a\xe2\x86" + "\x02\x60\xca\xb4\xd7\x5e\xe0\x0f\x73\xdd\xa2\x38\x7c\xae\x0f\x5a" + "\x1a\xd7\xfd\xb6\xc8\x6f\xdd\xe0\x98\xd5\x07\xea\x1f\x2a\xbb\x9e" + "\xef\x01\x24\x04\xee\xf5\x89\xb1\x12\x26\x54\x95\xef\xcb\x84\xe9" + "\xae\x05\xef\x63\x25\x15\x65\x79\x79\x79\x91\xc3\x76\x72\xb4\x85" + "\x86\xd9\xd3\x03\xb0\xff\x04\x96\x05\x3c\xde\xbf\x47\x34\x76\x70" + "\x17\xd2\x24\x83\xb9\xbb\xcf\x70\x7c\xb8\xc6\x7b\x4e\x01\x86\x36" + "\xc7\xc5\xe5\x8b\x7c\x69\x74\x9a\xfe\x1f\x58\x85\x0f\x00\xf8\x4e" + "\xf1\x56\xdc\xd1\x11\x28\x2c\xcf\x6c\xb9\xc9\x57\x17\x2e\x19\x19" + "\x55\xb3\x4c\xd8\xfb\xe7\x6f\x70\x63\xf9\x53\x45\xdd\xd5\x62\x95" + "\xd3\x7d\x7e\xa0\x00\x1a\x62\x9f\x96\x0a\x5d\x0a\x25\x02\xbb\xff" + "\x5a\xe8\x9e\x5a\x66\x08\x93\xbc\x92\xaf\xd2\x28\x04\x97\xc1\x54" + "\xfe\xcc\x0a\x25\xa2\xf4\x1d\x5a\x9a\xb1\x3e\x9c\xba\x78\xe2\xcf" + "\x71\x70\xe3\x40\xea\xba\x69\x9b\x03\xdd\x99\x26\x09\x84\x9d\x69" + "\x4d\x3d\x0b\xe9\x3f\x51\xcd\x05\xe5\x00\xaf\x2c\xd3\xf6\xc0\x68" + "\xb5\x23\x53\x33\x14\xbd\x39\x1c\xbd\x1b\xe6\x72\x90\xcc\xc2\x86" + "\x1a\x42\x83\x55\xb3\xed\x0b\x62\x6d\x0e\xbb\x9e\x2a\x42\x32\x05" + "\x3f\xf2\x2c\xc8\x9f\x3c\xd2\xb1\x0b\xb6\x4c\xa0\x22\x36\xee\xb9" + "\x55\x23\x3e\x80\xc7\x28\x7c\x39\x11\xd3\x4a\x96\x2e\xef\x52\x34" + "\xf2\xda\xb1\xc6\xf5\x02\x10\xbf\x56\x6b\x50\x56\xcd\x2c\xfe\xe1" + "\x94\x14\x19\x24\x6e\x9a\xdf\x0c\xb8\xe2\xb8\xd5\xa3\xc1\x22\x8e" + "\x84\x92\x00\x16\xf1\x3f\x83\xf6\x36\x31\xa5\x38\xc6\xcf\xf8\x9b" + "\x03\xc7\x6f\xb9\xa1\x04\xdf\x20\x0f\x0b\x0f\x70\xff\x57\x36\x7f" + "\xb3\x6b\xcb\x8f\x48\xf7\xb2\xdb\x85\x05\xd1\xfe\x34\x05\xf6\x57" + "\xb4\x5b\xcc\x3f\x0e\xba\x36\x59\xb0\xfd\x4d\xf6\xf4\x5e\xd2\x65" + "\x1d\x98\x87\xb4\x5e\xff\x29\xaa\x84\x9b\x44\x0f\x06\x36\x61\xbd" + "\xdb\x51\xda\x56\xc2\xd6\x19\xe2\x57\x4f\xd0\x29\x71\xc8\xe4\xd6" + "\xfb\x8c\xd0\xfc\x4f\x25\x09\xa6\xfc\x67\xe2\xb8\xac\xd3\x88\x8f" + "\x1f\xf6\xa1\xe3\x45\xa6\x34\xe3\xb1\x6b\xb7\x37\x0e\x06\xc7\x63" + "\xde\xac\x3b\xac\x07\x91\x64\xcc\x12\x10\x46\x85\x14\x0b\x6b\x03" + "\xba\x4a\x85\xae\xc5\x8c\xa5\x9d\x36\x38\x33\xca\x42\x9c\x4b\x0c" + "\x46\xe1\x77\xe9\x1f\x80\xfe\xb7\x1d\x5a\xf4\xc6\x11\x26\x78\xea" + "\x81\x25\x77\x47\xed\x8b\x59\xc2\x6b\x49\xff\x83\x56\xec\xa5\xf0" + "\xe0\x8b\x15\xd4\x99\x40\x2a\x65\x2a\x98\xf4\x71\x35\x63\x84\x08" + "\x4d\xcd\x71\x85\x55\xbc\xa4\x1c\x90\x93\x03\x41\xde\xed\x78\x62" + "\x07\x30\x50\xac\x60\x21\x06\xc3\xab\xa4\x04\xc0\xc2\x32\x07\xc4" + "\x1f\x2f\xec\xe2\x32\xbf\xbe\x5e\x50\x5b\x2a\x19\x71\x44\x37\x76" + "\x8b\xbc\xdb\x73\x98\x65\x78\xc9\x33\x97\x7e\xdc\x60\xa8\x87\xf2" + "\xb5\x96\x55\x7f\x44\x07\xcb\x3b\xf3\xd7\x82\xfd\x77\x21\x82\x21" + "\x1a\x8b\xa2\xf5\x1f\x66\xd0\x57\x00\x4f\xa9\xa5\x33\xb8\x69\x91" + "\xe8\x2e\xf7\x73\x47\x89\x30\x9b\xb1\xfd\xe1\x5d\x11\xfd\x84\xd9" + "\xa2\x91\x1f\x8a\xa7\x7a\x77\x8e\x3b\x10\x1d\x0a\x59\x50\x34\xb0" + "\xc3\x90\x9f\x56\xb7\x43\xeb\x51\x99\x2b\x8e\x6d\x7b\x58\xe7\xc0" + "\x7f\x3d\xa0\x27\x50\xf2\x6e\xc8\x1e\x7f\x84\xb3\xe1\xf7\x09\x85" + "\xd2\x9b\x56\x6b\xba\xa5\x19\x2e\xec\xd8\x5c\xf5\x4e\x43\x36\x2e" + "\x89\x85\x41\x7f\x9c\x91\x2e\x62\xc3\x41\xcf\x0e\xa1\x7f\xeb\x50", + .secret_size = 784, + .b_public_size = 768, + .expected_a_public_size = 768, + .expected_ss_size = 768, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x00" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#else + "\x00\x01" /* type */ + "\x00\x10" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#endif + .b_secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x03" /* len */ + "\x00\x03\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x03\x10" /* len */ + "\x00\x00\x03\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x63\x3e\x6f\xe0\xfe\x9f\x4a\x01\x62\x77\xce\xf1\xc7\xcc\x49\x4d" + "\x92\x53\x56\xe3\x39\x15\x81\xb2\xcd\xdc\xaf\x5e\xbf\x31\x1f\x69" + "\xce\x41\x35\x24\xaa\x46\x53\xb5\xb7\x3f\x2b\xad\x95\x14\xfb\xe4" + "\x9a\x61\xcd\x0f\x1f\x02\xee\xa4\x79\x2c\x9d\x1a\x7c\x62\x82\x39" + "\xdd\x43\xcc\x58\x9f\x62\x47\x56\x1d\x0f\xc2\x67\xbc\x24\xd0\xf9" + "\x0a\x50\x1b\x10\xe7\xbb\xd1\xc2\x01\xbb\xc4\x4c\xda\x12\x60\x0e" + "\x95\x2b\xde\x09\xd6\x67\xe1\xbc\x4c\xb9\x67\xdf\xd0\x1f\x97\xb4" + "\xde\xcb\x6b\x78\x83\x51\x74\x33\x01\x7f\xf6\x0a\x95\x69\x93\x00" + "\x2a\xc3\x75\x8e\xef\xbe\x53\x11\x6d\xc4\xd0\x9f\x6d\x63\x48\xc1" + "\x91\x1f\x7d\x88\xa7\x90\x78\xd1\x7e\x52\x42\x10\x01\xb4\x27\x95" + "\x91\x43\xcc\x82\x91\x86\x62\xa0\x9d\xef\x65\x6e\x67\xcf\x19\x11" + "\x35\x37\x5e\x94\x97\x83\xa6\x83\x1c\x7e\x8a\x3e\x32\xb0\xce\xff" + "\x20\xdc\x7b\x6e\x18\xd9\x6b\x27\x31\xfc\xc3\xef\x47\x8d\xbe\x34" + "\x2b\xc7\x60\x74\x3c\x93\xb3\x8e\x54\x77\x4e\x73\xe6\x40\x72\x35" + "\xb0\xf0\x06\x53\x43\xbe\xd0\xc3\x87\xcc\x38\x96\xa9\x10\xa0\xd6" + "\x17\xed\xa5\x6a\xf4\xf6\xaa\x77\x40\xed\x7d\x2e\x58\x0f\x5b\x04" + "\x5a\x41\x12\x95\x22\xcb\xa3\xce\x8b\x6d\x6d\x89\xec\x7c\x1d\x25" + "\x27\x52\x50\xa0\x5b\x93\x8c\x5d\x3f\x56\xb9\xa6\x5e\xe5\xf7\x9b" + "\xc7\x9a\x4a\x2e\x79\xb5\xca\x29\x58\x52\xa0\x63\xe4\x9d\xeb\x4c" + "\x4c\xa8\x37\x0b\xe9\xa0\x18\xf1\x86\xf6\x4d\x32\xfb\x9e\x4f\xb3" + "\x7b\x5d\x58\x78\x70\xbd\x56\xac\x99\x75\x25\x71\x66\x76\x4e\x5e" + "\x67\x4f\xb1\x17\xa7\x8b\x55\x12\x87\x01\x4e\xd1\x66\xef\xd0\x70" + "\xaf\x14\x34\xee\x2a\x76\x49\x25\xa6\x2e\x43\x37\x75\x7d\x1a\xad" + "\x08\xd5\x01\x85\x9c\xe1\x20\xd8\x38\x5c\x57\xa5\xed\x9d\x46\x3a" + "\xb7\x46\x60\x29\x8b\xc4\x21\x50\x0a\x30\x9c\x57\x42\xe4\x35\xf8" + "\x12\x5c\x4f\xa2\x20\xc2\xc9\x43\xe3\x6d\x20\xbc\xdf\xb8\x37\x33" + "\x45\x43\x06\x4e\x08\x6f\x8a\xcd\x61\xc3\x1b\x05\x28\x82\xbe\xf0" + "\x48\x33\xe5\x93\xc9\x1a\x61\x16\x67\x03\x9d\x47\x9d\x74\xeb\xae" + "\x13\xf2\xb4\x1b\x09\x11\xf5\x15\xcb\x28\xfd\x50\xe0\xbc\x58\x36" + "\x38\x91\x2c\x07\x27\x1f\x49\x68\xf4\xce\xad\xf7\xba\xec\x5d\x3d" + "\xfd\x27\xe2\xcf\xf4\x56\xfe\x08\xa6\x11\x61\xcb\x6c\x9f\xf9\x3c" + "\x57\x0b\x8b\xaa\x00\x16\x18\xba\x1f\xe8\x4f\x01\xe2\x79\x2a\x0b" + "\xc1\xbd\x52\xef\xe6\xf7\x5a\x66\xfe\x07\x3b\x50\x6b\xbb\xcb\x39" + "\x3c\x94\xf6\x21\x0d\x68\x69\xa4\xed\x2e\xb5\x85\x03\x11\x38\x79" + "\xec\xb5\x22\x23\xdf\x9e\xad\xb4\xbe\xd7\xc7\xdf\xea\x30\x23\x8a" + "\xb7\x21\x0a\x9d\xbd\x99\x13\x7d\x5f\x7e\xaf\x28\x54\x3f\xca\x5e" + "\xf4\xfc\x05\x0d\x65\x67\xd8\xf6\x8e\x90\x9d\x0d\xcf\x62\x82\xd6" + "\x9f\x02\xf8\xca\xfa\x42\x24\x7f\x4d\xb7\xfc\x92\xa6\x4a\x51\xc4" + "\xd8\xae\x19\x87\xc6\xa3\x83\xbe\x7b\x6d\xc3\xf5\xb8\xad\x4a\x05" + "\x78\x84\x3a\x15\x2e\x40\xbe\x79\xa9\xc0\x12\xa1\x48\x39\xc3\xdb" + "\x47\x4f\x7d\xea\x6d\xc7\xfa\x2c\x4e\xe9\xa5\x85\x81\xea\x6c\xcd" + "\x8a\xe5\x74\x17\x76\x31\x31\x75\x96\x83\xca\x81\xbb\x5c\xa9\x79" + "\x2c\xbd\x09\xfe\xe4\x86\x0d\x8c\x76\x9c\xbc\xe8\x93\xe4\xd0\xe4" + "\x0f\xf8\xff\x24\x7e\x66\x61\x69\xfb\xe4\x46\x08\x94\x99\xa5\x53" + "\xd7\xe4\x29\x72\x86\x86\xe8\x1d\x37\xfa\xcb\xd0\x8d\x51\xd0\xbf" + "\x81\xcf\x55\xb9\xc5\x78\x8c\x74\xa0\x16\x3a\xd2\x19\x94\x29\x6a" + "\x5e\xec\xd3\x20\xa0\xb2\xfd\xce\xd4\x14\xa3\x39\x10\xa9\xf4\x4e" + "\xba\x21\x09\x5c\xe6\x61\x43\x51\xae\xc4\x71\xd7\x21\xef\x98\x39", + .b_public = + "\x45\x96\x5a\xb7\x78\x5c\xa4\x4d\x39\xb2\x5f\xc8\xc2\xaa\x1a\xf4" + "\xa6\x68\xf6\x6f\x7e\xa8\x4a\x5b\x0e\xba\x0a\x99\x85\xf9\x63\xd4" + "\x58\x21\x6d\xa8\x3c\xf4\x05\x10\xb0\x0d\x6f\x1c\xa0\x17\x85\xae" + "\x68\xbf\xcc\x00\xc8\x86\x1b\x24\x31\xc9\x49\x23\x91\xe0\x71\x29" + "\x06\x39\x39\x93\x49\x9c\x75\x18\x1a\x8b\x61\x73\x1c\x7f\x37\xd5" + "\xf1\xab\x20\x5e\x62\x25\xeb\x58\xd5\xfa\xc9\x7f\xad\x57\xd5\xcc" + "\x0d\xc1\x7a\x2b\x33\x2a\x76\x84\x33\x26\x97\xcf\x47\x9d\x72\x2a" + "\xc9\x39\xde\xa8\x42\x27\x2d\xdc\xee\x00\x60\xd2\x4f\x13\xe0\xde" + "\xd5\xc7\xf6\x7d\x8b\x2a\x43\x49\x40\x99\xc2\x61\x84\x8e\x57\x09" + "\x7c\xcc\x19\x46\xbd\x4c\xd2\x7c\x7d\x02\x4d\x88\xdf\x58\x24\x80" + "\xeb\x19\x3b\x2a\x13\x2b\x19\x85\x3c\xd8\x31\x03\x00\xa4\xd4\x57" + "\x23\x2c\x24\x37\xb3\x62\xea\x35\x29\xd0\x2c\xac\xfd\xbd\xdf\x3d" + "\xa6\xce\xfa\x0d\x5b\xb6\x15\x8b\xe3\x58\xe9\xad\x99\x87\x29\x51" + "\x8d\x97\xd7\xa9\x55\xf0\x72\x6e\x4e\x58\xcb\x2b\x4d\xbd\xd0\x48" + "\x7d\x14\x86\xdb\x3f\xa2\x5f\x6e\x35\x4a\xe1\x70\xb1\x53\x72\xb7" + "\xbc\xe9\x3d\x1b\x33\xc0\x54\x6f\x43\x55\x76\x85\x7f\x9b\xa5\xb3" + "\xc1\x1d\xd3\xfe\xe2\xd5\x96\x3d\xdd\x92\x04\xb1\xad\x75\xdb\x13" + "\x4e\x49\xfc\x35\x34\xc5\xda\x13\x98\xb8\x12\xbe\xda\x90\x55\x7c" + "\x11\x6c\xbe\x2b\x8c\x51\x29\x23\xc1\x51\xbc\x0c\x1c\xe2\x20\xfc" + "\xfe\xf2\xaa\x71\x9b\x21\xdf\x25\x1f\x68\x21\x7e\xe1\xc9\x87\xa0" + "\x20\xf6\x8d\x4f\x27\x8c\x3c\x0f\x9d\xf4\x69\x25\xaa\x49\xab\x94" + "\x22\x5a\x92\x3a\xba\xb4\xc2\x8c\x5a\xaa\x04\xbf\x46\xc5\xaa\x93" + "\xab\x0d\xe9\x54\x6c\x3a\x64\xa6\xa2\x21\x66\xee\x1c\x10\x21\x84" + "\xf2\x9e\xcc\x57\xac\xc2\x25\x62\xad\xbb\x59\xef\x25\x61\x6c\x81" + "\x38\x8a\xdc\x8c\xeb\x7b\x18\x1d\xaf\xa9\xc5\x9a\xf4\x49\x26\x8a" + "\x25\xc4\x3e\x31\x95\x28\xef\xf7\x72\xe9\xc5\xaa\x59\x72\x2b\x67" + "\x47\xe8\x6b\x51\x05\x24\xb8\x18\xb3\x34\x0f\x8c\x2b\x80\xba\x61" + "\x1c\xbe\x9e\x9a\x7c\xe3\x60\x5e\x49\x02\xff\x50\x8a\x64\x28\x64" + "\x46\x7b\x83\x14\x72\x6e\x59\x9b\x56\x09\xb4\xf0\xde\x52\xc3\xf3" + "\x58\x17\x6a\xae\xb1\x0f\xf4\x39\xcc\xd8\xce\x4d\xe1\x51\x17\x88" + "\xe4\x98\xd9\xd1\xa9\x55\xbc\xbf\x7e\xc4\x51\x96\xdb\x44\x1d\xcd" + "\x8d\x74\xad\xa7\x8f\x87\x83\x75\xfc\x36\xb7\xd2\xd4\x89\x16\x97" + "\xe4\xc6\x2a\xe9\x65\xc8\xca\x1c\xbd\x86\xaf\x57\x80\xf7\xdd\x42" + "\xc0\x3b\x3f\x87\x51\x02\x2f\xf8\xd8\x68\x0f\x3d\x95\x2d\xf1\x67" + "\x09\xa6\x5d\x0b\x7e\x01\xb4\xb2\x32\x01\xa8\xd0\x58\x0d\xe6\xa2" + "\xd8\x4b\x22\x10\x7d\x11\xf3\xc2\x4e\xb8\x43\x8e\x31\x79\x59\xe2" + "\xc4\x96\x29\x17\x40\x06\x0d\xdf\xdf\xc3\x02\x30\x2a\xd1\x8e\xf2" + "\xee\x2d\xd2\x12\x63\x5a\x1d\x3c\xba\x4a\xc4\x56\x90\xc6\x12\x0b" + "\xe0\x04\x3f\x35\x59\x8e\x40\x75\xf4\x4c\x10\x61\xb9\x30\x89\x7c" + "\x8d\x0e\x25\xb7\x5a\x6b\x97\x05\xc6\x37\x80\x6e\x94\x56\xa8\x5f" + "\x03\x94\x59\xc8\xc5\x3e\xdc\x23\xe5\x68\x4f\xd7\xbb\x6d\x7e\xc1" + "\x8d\xf9\xcc\x3f\x38\xad\x77\xb3\x18\x61\xed\x04\xc0\x71\xa7\x96" + "\xb1\xaf\x1d\x69\x78\xda\x6d\x89\x8b\x50\x75\x99\x44\xb3\xb2\x75" + "\xd1\xc8\x14\x40\xa1\x0a\xbf\xc4\x45\xc4\xee\x12\x90\x76\x26\x64" + "\xb7\x73\x2e\x0b\x0c\xfa\xc3\x55\x29\x24\x1b\x7a\x00\x27\x07\x26" + "\x36\xf0\x38\x1a\xe3\xb7\xc4\x8d\x1c\x9c\xa9\xc0\xc1\x45\x91\x9e" + "\x86\xdd\x82\x94\x45\xfa\xcd\x5a\x19\x12\x7d\xef\xda\x17\xad\x21" + "\x17\x89\x8b\x45\xa7\xf5\xed\x51\x9e\x58\x13\xdc\x84\xa4\xe6\x37", + .secret_size = 16, + .b_secret_size = 784, + .b_public_size = 768, + .expected_a_public_size = 768, + .expected_ss_size = 768, + .genkey = true, + }, +}; -} +static const struct kpp_testvec ffdhe8192_dh_tv_template[] __maybe_unused = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x04" /* len */ + "\x00\x04\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x04\x10" /* len */ + "\x00\x00\x04\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x76\x6e\xeb\xf9\xeb\x76\xae\x37\xcb\x19\x49\x8b\xeb\xaf\xb0\x4b" + "\x6d\xe9\x15\xad\xda\xf2\xef\x58\xe9\xd6\xdd\x4c\xb3\x56\xd0\x3b" + "\x00\xb0\x65\xed\xae\xe0\x2e\xdf\x8f\x45\x3f\x3c\x5d\x2f\xfa\x96" + "\x36\x33\xb2\x01\x8b\x0f\xe8\x46\x15\x6d\x60\x5b\xec\x32\xc3\x3b" + "\x06\xf3\xb4\x1b\x9a\xef\x3c\x03\x0e\xcc\xce\x1d\x24\xa0\xc9\x08" + "\x65\xf9\x45\xe5\xd2\x43\x08\x88\x58\xd6\x46\xe7\xbb\x25\xac\xed" + "\x3b\xac\x6f\x5e\xfb\xd6\x19\xa6\x20\x3a\x1d\x0c\xe8\x00\x72\x54" + "\xd7\xd9\xc9\x26\x49\x18\xc6\xb8\xbc\xdd\xf3\xce\xf3\x7b\x69\x04" + "\x5c\x6f\x11\xdb\x44\x42\x72\xb6\xb7\x84\x17\x86\x47\x3f\xc5\xa1" + "\xd8\x86\xef\xe2\x27\x49\x2b\x8f\x3e\x91\x12\xd9\x45\x96\xf7\xe6" + "\x77\x76\x36\x58\x71\x9a\xb1\xdb\xcf\x24\x9e\x7e\xad\xce\x45\xba" + "\xb5\xec\x8e\xb9\xd6\x7b\x3d\x76\xa4\x85\xad\xd8\x49\x9b\x80\x9d" + "\x7f\x9f\x85\x09\x9e\x86\x5b\x6b\xf3\x8d\x39\x5e\x6f\xe4\x30\xc8" + "\xa5\xf3\xdf\x68\x73\x6b\x2e\x9a\xcb\xac\x0a\x0d\x44\xc1\xaf\xb2" + "\x11\x1b\x7c\x43\x08\x44\x43\xe2\x4e\xfd\x93\x30\x99\x09\x12\xbb" + "\xf6\x31\x34\xa5\x3d\x45\x98\xee\xd7\x2a\x1a\x89\xf5\x37\x92\x33" + "\xa0\xdd\xf5\xfb\x1f\x90\x42\x55\x5a\x0b\x82\xff\xf0\x96\x92\x15" + "\x65\x5a\x55\x96\xca\x1b\xd5\xe5\xb5\x94\xde\x2e\xa6\x03\x57\x9e" + "\x15\xe4\x32\x2b\x1f\xb2\x22\x21\xe9\xa0\x05\xd3\x65\x6c\x11\x66" + "\x25\x38\xbb\xa3\x6c\xc2\x0b\x2b\xd0\x7a\x20\x26\x29\x37\x5d\x5f" + "\xd8\xff\x2a\xcd\x46\x6c\xd6\x6e\xe5\x77\x1a\xe6\x33\xf1\x8e\xc8" + "\x10\x30\x11\x00\x27\xf9\x7d\x0e\x28\x43\xa7\x67\x38\x7f\x16\xda" + "\xd0\x01\x8e\xa4\xe8\x6f\xcd\x23\xaf\x77\x52\x34\xad\x7e\xc3\xed" + "\x2d\x10\x0a\x33\xdc\xcf\x1b\x88\x0f\xcc\x48\x7f\x42\xf0\x9e\x13" + "\x1f\xf5\xd1\xe9\x90\x87\xbd\xfa\x5f\x1d\x77\x55\xcb\xc3\x05\xaf" + "\x71\xd0\xe0\xab\x46\x31\xd7\xea\x89\x54\x2d\x39\xaf\xf6\x4f\x74" + "\xaf\x46\x58\x89\x78\x95\x2e\xe6\x90\xb7\xaa\x00\x73\x9f\xed\xb9" + "\x00\xd6\xf6\x6d\x26\x59\xcd\x56\xdb\xf7\x3d\x5f\xeb\x6e\x46\x33" + "\xb1\x23\xed\x9f\x8d\x58\xdc\xb4\x28\x3b\x90\x09\xc4\x61\x02\x1f" + "\xf8\x62\xf2\x6e\xc1\x94\x71\x66\x93\x11\xdf\xaa\x3e\xd7\xb5\xe5" + "\xc1\x78\xe9\x14\xcd\x55\x16\x51\xdf\x8d\xd0\x94\x8c\x43\xe9\xb8" + "\x1d\x42\x7f\x76\xbc\x6f\x87\x42\x88\xde\xd7\x52\x78\x00\x4f\x18" + "\x02\xe7\x7b\xe2\x8a\xc3\xd1\x43\xa5\xac\xda\xb0\x8d\x19\x96\xd4" + "\x81\xe0\x75\xe9\xca\x41\x7e\x1f\x93\x0b\x26\x24\xb3\xaa\xdd\x10" + "\x20\xd3\xf2\x9f\x3f\xdf\x65\xde\x67\x79\xdc\x76\x9f\x3c\x72\x75" + "\x65\x8a\x30\xcc\xd2\xcc\x06\xb1\xab\x62\x86\x78\x5d\xb8\xce\x72" + "\xb3\x12\xc7\x9f\x07\xd0\x6b\x98\x82\x9b\x6c\xbb\x15\xe5\xcc\xf4" + "\xc8\xf4\x60\x81\xdc\xd3\x09\x1b\x5e\xd4\xf3\x55\xcf\x1c\x16\x83" + "\x61\xb4\x2e\xcc\x08\x67\x58\xfd\x46\x64\xbc\x29\x4b\xdd\xda\xec" + "\xdc\xc6\xa9\xa5\x73\xfb\xf8\xf3\xaf\x89\xa8\x9e\x25\x14\xfa\xac" + "\xeb\x1c\x7c\x80\x96\x66\x4d\x41\x67\x9b\x07\x4f\x0a\x97\x17\x1c" + "\x4d\x61\xc7\x2e\x6f\x36\x98\x29\x50\x39\x6d\xe7\x70\xda\xf0\xc8" + "\x05\x80\x7b\x32\xff\xfd\x12\xde\x61\x0d\xf9\x4c\x21\xf1\x56\x72" + "\x3d\x61\x46\xc0\x2d\x07\xd1\x6c\xd3\xbe\x9a\x21\x83\x85\xf7\xed" + "\x53\x95\x44\x40\x8f\x75\x12\x18\xc2\x9a\xfd\x5e\xce\x66\xa6\x7f" + "\x57\xc0\xd7\x73\x76\xb3\x13\xda\x2e\x58\xc6\x27\x40\xb2\x2d\xef" + "\x7d\x72\xb4\xa8\x75\x6f\xcc\x5f\x42\x3e\x2c\x90\x36\x59\xa0\x34" + "\xaa\xce\xbc\x04\x4c\xe6\x56\xc2\xcd\xa6\x1c\x59\x04\x56\x53\xcf" + "\x6d\xd7\xf0\xb1\x4f\x91\xfa\x84\xcf\x4b\x8d\x50\x4c\xf8\x2a\x31" + "\x5f\xe3\xba\x79\xb4\xcc\x59\x64\xe3\x7a\xfa\xf6\x06\x9d\x04\xbb" + "\xce\x61\xbf\x9e\x59\x0a\x09\x51\x6a\xbb\x0b\x80\xe0\x91\xc1\x51" + "\x04\x58\x67\x67\x4b\x42\x4f\x95\x68\x75\xe2\x1f\x9c\x14\x70\xfd" + "\x3a\x8a\xce\x8b\x04\xa1\x89\xe7\xb4\xbf\x70\xfe\xf3\x0c\x48\x04" + "\x3a\xd2\x85\x68\x03\xe7\xfa\xec\x5b\x55\xb7\x95\xfd\x5b\x19\x35" + "\xad\xcb\x4a\x63\x03\x44\x64\x2a\x48\x59\x9a\x26\x43\x96\x8c\xe6" + "\xbd\xb7\x90\xd4\x5f\x8d\x08\x28\xa8\xc5\x89\x70\xb9\x6e\xd3\x3b" + "\x76\x0e\x37\x98\x15\x27\xca\xc9\xb0\xe0\xfd\xf3\xc6\xdf\x69\xce" + "\xe1\x5f\x6a\x3e\x5c\x86\xe2\x58\x41\x11\xf0\x7e\x56\xec\xe4\xc9" + "\x0d\x87\x91\xfb\xb9\xc8\x0d\x34\xab\xb0\xc6\xf2\xa6\x00\x7b\x18" + "\x92\xf4\x43\x7f\x01\x85\x2e\xef\x8c\x72\x50\x10\xdb\xf1\x37\x62" + "\x16\x85\x71\x01\xa8\x2b\xf0\x13\xd3\x7c\x0b\xaf\xf1\xf3\xd1\xee" + "\x90\x41\x5f\x7d\x5b\xa9\x83\x4b\xfa\x80\x59\x50\x73\xe1\xc4\xf9" + "\x5e\x4b\xde\xd9\xf5\x22\x68\x5e\x65\xd9\x37\xe4\x1a\x08\x0e\xb1" + "\x28\x2f\x40\x9e\x37\xa8\x12\x56\xb7\xb8\x64\x94\x68\x94\xff\x9f", + .b_public = + "\x26\xa8\x3a\x97\xe0\x52\x76\x07\x26\xa7\xbb\x21\xfd\xe5\x69\xde" + "\xe6\xe0\xb5\xa0\xf1\xaa\x51\x2b\x56\x1c\x3c\x6c\xe5\x9f\x8f\x75" + "\x71\x04\x86\xf6\x43\x2f\x20\x7f\x45\x4f\x5c\xb9\xf3\x90\xbe\xa9" + "\xa0\xd7\xe8\x03\x0e\xfe\x99\x9b\x8a\x1c\xbe\xa7\x63\xe8\x2b\x45" + "\xd4\x2c\x65\x25\x4c\x33\xda\xc5\x85\x77\x5d\x62\xea\x93\xe4\x45" + "\x59\xff\xa1\xd2\xf1\x73\x11\xed\x02\x64\x8a\x1a\xfb\xe1\x88\xa6" + "\x50\x6f\xff\x87\x12\xbb\xfc\x10\xcf\x19\x41\xb0\x35\x44\x7d\x51" + "\xe9\xc0\x77\xf2\x73\x21\x2e\x62\xbf\x65\xa5\xd1\x3b\xb1\x3e\x19" + "\x75\x4b\xb7\x8e\x03\xc3\xdf\xc8\xb2\xe6\xec\x2d\x7d\xa5\x6a\xba" + "\x93\x47\x50\xeb\x6e\xdb\x88\x05\x45\xad\x03\x8c\xf7\x9a\xe1\xc9" + "\x1e\x16\x96\x37\xa5\x3e\xe9\xb9\xa8\xdc\xb9\xa9\xf6\xa1\x3d\xed" + "\xbe\x12\x29\x8a\x3d\x3d\x90\xfc\x94\xfe\x66\x28\x1c\x1b\xa4\x89" + "\x47\x66\x4f\xac\x14\x00\x22\x2d\x5c\x03\xea\x71\x4d\x19\x7d\xd6" + "\x58\x39\x4c\x3d\x06\x2b\x30\xa6\xdc\x2c\x8d\xd1\xde\x79\x77\xfa" + "\x9c\x6b\x72\x11\x8a\x7f\x7d\x37\x28\x2a\x88\xbf\x0a\xdb\xac\x3b" + "\xc5\xa5\xd5\x7e\x25\xec\xa6\x7f\x5b\x53\x75\x83\x49\xd4\x77\xcc" + "\x7d\x7e\xd3\x3d\x30\x2c\x98\x3f\x18\x9a\x11\x8a\x37\xda\x99\x0f" + "\x3b\x06\xe1\x87\xd5\xe9\x4e\xe0\x9c\x0e\x39\x34\xe2\xdd\xf6\x58" + "\x60\x63\xa6\xea\xe8\xc0\xb4\xde\xdf\xa0\xbc\x21\xc3\x2d\xf4\xa4" + "\xc8\x6f\x62\x6c\x0f\x71\x88\xf9\xda\x2d\x30\xd5\x95\xe1\xfc\x6d" + "\x88\xc5\xc3\x95\x51\x83\xde\x41\x46\x6f\x7e\x1b\x10\x48\xad\x2b" + "\x82\x88\xa2\x6f\x57\x4d\x4a\xbd\x90\xc8\x06\x8f\x52\x5d\x6e\xee" + "\x09\xe6\xa3\xcb\x30\x9c\x14\xf6\xac\x66\x9b\x81\x0a\x75\x42\x6b" + "\xab\x27\xec\x76\xfb\x8d\xc5\xbf\x0e\x93\x81\x7b\x81\xd4\x85\xa6" + "\x90\x5a\xa6\xa2\x8b\xa9\xb7\x34\xe6\x15\x36\x93\x8b\xe2\x99\xc7" + "\xad\x66\x7e\xd6\x89\xa9\xc8\x15\xcb\xc5\xeb\x06\x85\xd4\x2f\x6e" + "\x9b\x95\x7a\x06\x6c\xfa\x31\x1d\xc4\xe5\x7d\xfb\x10\x35\x88\xc2" + "\xbe\x1c\x16\x5d\xc2\xf4\x0d\xf3\xc9\x94\xb2\x7e\xa7\xbd\x9c\x03" + "\x32\xaf\x8b\x1a\xc8\xcc\x82\xd8\x87\x96\x6e\x3d\xcc\x93\xd2\x43" + "\x73\xf9\xde\xec\x49\x49\xf4\x56\x2a\xc8\x6e\x32\x70\x48\xf8\x70" + "\xa3\x96\x31\xf4\xf2\x08\xc5\x12\xd2\xeb\xb6\xea\xa3\x07\x05\x61" + "\x74\xa3\x04\x2f\x17\x82\x40\x5e\x4c\xd1\x51\xb8\x10\x5b\xc8\x9f" + "\x87\x73\x80\x0d\x6f\xc6\xb9\xf6\x7c\x31\x0a\xcc\xd9\x03\x0f\x7a" + "\x47\x69\xb1\x55\xab\xe9\xb5\x75\x62\x9e\x95\xbe\x7b\xa9\x53\x6e" + "\x28\x73\xdc\xb3\xa4\x8a\x1c\x91\xf5\x8a\xf9\x32\x2b\xbd\xa5\xdc" + "\x07\xb5\xaf\x49\xdb\x9c\x35\xc9\x69\xde\xac\xb1\xd0\x86\xcb\x31" + "\x0b\xc4\x4f\x63\x4e\x70\xa7\x80\xe3\xbc\x0b\x73\x0e\xf2\x8c\x87" + "\x88\x7b\xa9\x6d\xde\x8a\x73\x14\xb9\x80\x55\x03\x2b\x29\x64\x6a" + "\xda\x48\x0e\x78\x07\x40\x48\x46\x58\xa9\x4e\x68\x1d\xd1\xc1\xc8" + "\x3b\x35\x53\x61\xd5\xe3\x0d\x4c\x42\x74\x10\x67\x85\x9f\x66\x2a" + "\xf7\x2b\x7b\x77\x8b\x6e\xda\x2c\xc1\x5a\x20\x34\x3f\xf5\x8b\x6f" + "\xe4\x61\xf5\x58\xab\x72\x1a\xf1\x8d\x28\xcc\xa5\x30\x68\xb5\x50" + "\x7b\x81\x43\x89\x8e\xa9\xac\x63\x3a\x4a\x78\x7b\xd2\x45\xe6\xe0" + "\xdc\x5d\xf2\x1a\x2b\x54\x50\xa5\x9d\xf6\xe7\x9f\x25\xaf\x56\x6a" + "\x84\x2a\x75\xa3\x9a\xc7\xfa\x94\xec\x83\xab\xa5\xaa\xe1\xf9\x89" + "\x29\xa9\xf6\x53\x24\x24\xae\x4a\xe8\xbc\xe8\x9e\x5c\xd7\x54\x7c" + "\x65\x20\x97\x28\x94\x76\xf9\x9e\x81\xcf\x98\x6a\x3a\x7b\xec\xf3" + "\x09\x60\x2e\x43\x18\xb5\xf6\x8c\x44\x0f\xf2\x0a\x17\x5b\xac\x98" + "\x30\xab\x6e\xd5\xb3\xef\x25\x68\x50\xb6\xe1\xc0\xe4\x5a\x63\x43" + "\xea\xca\xda\x23\xc1\xc2\xe9\x30\xec\xb3\x9f\xbf\x1f\x09\x76\xaf" + "\x65\xbc\xb5\xab\x30\xac\x0b\x05\xef\x5c\xa3\x65\x77\x33\x1c\xc5" + "\xdf\xc9\x39\xab\xca\xf4\x3b\x88\x25\x6d\x50\x87\xb1\x79\xc2\x23" + "\x9d\xb5\x21\x01\xaa\xa3\xb7\x61\xa3\x48\x91\x72\x3d\x54\x85\x86" + "\x91\x81\x35\x78\xbf\x8f\x27\x57\xcb\x9b\x34\xab\x63\x40\xf1\xbc" + "\x23\x5a\x26\x6a\xba\x57\xe2\x8f\x2a\xdc\x82\xe0\x3b\x7f\xec\xd3" + "\xd8\x9d\xd3\x13\x54\x70\x64\xc3\xfd\xbf\xa3\x46\xa7\x53\x42\x7f" + "\xc1\xbd\x7b\xb3\x13\x47\x2a\x45\x1e\x76\x2c\x0d\x6d\x46\x26\x24" + "\xa8\xc7\x00\x2b\x10\x7f\x2a\x6c\xfc\x68\x4e\x6e\x85\x53\x00\xaf" + "\xd5\xfb\x59\x64\xc7\x9b\x24\xd1\x05\xdc\x34\x53\x6d\x27\xa9\x79" + "\xff\xd7\x5e\x7a\x40\x81\x8e\xc3\xf2\x38\xc9\x8d\x87\xb5\x38\xda" + "\x43\x64\x1b\x59\x62\x88\xc1\x6e\x85\x84\x33\xcd\x6d\x7b\x62\x1d" + "\x60\xf9\x98\xf7\xd1\xb1\xd4\xbe\x56\x6e\xa8\x6f\xff\xe7\x8b\x60" + "\x53\x80\xc7\x7c\xe0\x78\x89\xa9\xab\x42\x8f\x8e\x4d\x92\xac\xa7" + "\xfd\x47\x11\xc7\xdb\x7c\x77\xfb\xa4\x1d\x70\xaf\x56\x14\x52\xb0", + .expected_a_public = + "\xa1\x6c\x9e\xda\x45\x4d\xf6\x59\x04\x00\xc1\xc6\x8b\x12\x3b\xcd" + "\x07\xe4\x3e\xec\xac\x9b\xfc\xf7\x6d\x73\x39\x9e\x52\xf8\xbe\x33" + "\xe2\xca\xea\x99\x76\xc7\xc9\x94\x5c\xf3\x1b\xea\x6b\x66\x4b\x51" + "\x90\xf6\x4f\x75\xd5\x85\xf4\x28\xfd\x74\xa5\x57\xb1\x71\x0c\xb6" + "\xb6\x95\x70\x2d\xfa\x4b\x56\xe0\x56\x10\x21\xe5\x60\xa6\x18\xa4" + "\x78\x8c\x07\xc0\x2b\x59\x9c\x84\x5b\xe9\xb9\x74\xbf\xbc\x65\x48" + "\x27\x82\x40\x53\x46\x32\xa2\x92\x91\x9d\xf6\xd1\x07\x0e\x1d\x07" + "\x1b\x41\x04\xb1\xd4\xce\xae\x6e\x46\xf1\x72\x50\x7f\xff\xa8\xa2" + "\xbc\x3a\xc1\xbb\x28\xd7\x7d\xcd\x7a\x22\x01\xaf\x57\xb0\xa9\x02" + "\xd4\x8a\x92\xd5\xe6\x8e\x6f\x11\x39\xfe\x36\x87\x89\x42\x25\x42" + "\xd9\xbe\x67\x15\xe1\x82\x8a\x5e\x98\xc2\xd5\xde\x9e\x13\x1a\xe7" + "\xf9\x9f\x8e\x2d\x49\xdc\x4d\x98\x8c\xdd\xfd\x24\x7c\x46\xa9\x69" + "\x3b\x31\xb3\x12\xce\x54\xf6\x65\x75\x40\xc2\xf1\x04\x92\xe3\x83" + "\xeb\x02\x3d\x79\xc0\xf9\x7c\x28\xb3\x97\x03\xf7\x61\x1c\xce\x95" + "\x1a\xa0\xb3\x77\x1b\xc1\x9f\xf8\xf6\x3f\x4d\x0a\xfb\xfa\x64\x1c" + "\xcb\x37\x5b\xc3\x28\x60\x9f\xd1\xf2\xc4\xee\x77\xaa\x1f\xe9\xa2" + "\x89\x4c\xc6\xb7\xb3\xe4\xa5\xed\xa7\xe8\xac\x90\xdc\xc3\xfb\x56" + "\x9c\xda\x2c\x1d\x1a\x9a\x8c\x82\x92\xee\xdc\xa0\xa4\x01\x6e\x7f" + "\xc7\x0e\xc2\x73\x7d\xa6\xac\x12\x01\xc0\xc0\xc8\x7c\x84\x86\xc7" + "\xa5\x94\xe5\x33\x84\x71\x6e\x36\xe3\x3b\x81\x30\xe0\xc8\x51\x52" + "\x2b\x9e\x68\xa2\x6e\x09\x95\x8c\x7f\x78\x82\xbd\x53\x26\xe7\x95" + "\xe0\x03\xda\xc0\xc3\x6e\xcf\xdc\xb3\x14\xfc\xe9\x5b\x9b\x70\x6c" + "\x93\x04\xab\x13\xf7\x17\x6d\xee\xad\x32\x48\xe9\xa0\x94\x1b\x14" + "\x64\x4f\xa1\xb3\x8d\x6a\xca\x28\xfe\x4a\xf4\xf0\xc5\xb7\xf9\x8a" + "\x8e\xff\xfe\x57\x6f\x20\xdb\x04\xab\x02\x31\x22\x42\xfd\xbd\x77" + "\xea\xce\xe8\xc7\x5d\xe0\x8e\xd6\x66\xd0\xe4\x04\x2f\x5f\x71\xc7" + "\x61\x2d\xa5\x3f\x2f\x46\xf2\xd8\x5b\x25\x82\xf0\x52\x88\xc0\x59" + "\xd3\xa3\x90\x17\xc2\x04\x13\xc3\x13\x69\x4f\x17\xb1\xb3\x46\x4f" + "\xa7\xe6\x8b\x5e\x3e\x95\x0e\xf5\x42\x17\x7f\x4d\x1f\x1b\x7d\x65" + "\x86\xc5\xc8\xae\xae\xd8\x4f\xe7\x89\x41\x69\xfd\x06\xce\x5d\xed" + "\x44\x55\xad\x51\x98\x15\x78\x8d\x68\xfc\x93\x72\x9d\x22\xe5\x1d" + "\x21\xc3\xbe\x3a\x44\x34\xc0\xa3\x1f\xca\xdf\x45\xd0\x5c\xcd\xb7" + "\x72\xeb\xae\x7a\xad\x3f\x05\xa0\xe3\x6e\x5a\xd8\x52\xa7\xf1\x1e" + "\xb4\xf2\xcf\xe7\xdf\xa7\xf2\x22\x00\xb2\xc4\x17\x3d\x2c\x15\x04" + "\x71\x28\x69\x5c\x69\x21\xc8\xf1\x9b\xd8\xc7\xbc\x27\xa3\x85\xe9" + "\x53\x77\xd3\x65\xc3\x86\xdd\xb3\x76\x13\xfb\xa1\xd4\xee\x9d\xe4" + "\x51\x3f\x83\x59\xe4\x47\xa8\xa6\x0d\x68\xd5\xf6\xf4\xca\x31\xcd" + "\x30\x48\x34\x90\x11\x8e\x87\xe9\xea\xc9\xd0\xc3\xba\x28\xf9\xc0" + "\xc9\x8e\x23\xe5\xc2\xee\xf2\x47\x9c\x41\x1c\x10\x33\x27\x23\x49" + "\xe5\x0d\x18\xbe\x19\xc1\xba\x6c\xdc\xb7\xa1\xe7\xc5\x0d\x6f\xf0" + "\x8c\x62\x6e\x0d\x14\xef\xef\xf2\x8e\x01\xd2\x76\xf5\xc1\xe1\x92" + "\x3c\xb3\x76\xcd\xd8\xdd\x9b\xe0\x8e\xdc\x24\x34\x13\x65\x0f\x11" + "\xaf\x99\x7a\x2f\xe6\x1f\x7d\x17\x3e\x8a\x68\x9a\x37\xc8\x8d\x3e" + "\xa3\xfe\xfe\x57\x22\xe6\x0e\x50\xb5\x98\x0b\x71\xd8\x01\xa2\x8d" + "\x51\x96\x50\xc2\x41\x31\xd8\x23\x98\xfc\xd1\x9d\x7e\x27\xbb\x69" + "\x78\xe0\x87\xf7\xe4\xdd\x58\x13\x9d\xec\x00\xe4\xb9\x70\xa2\x94" + "\x5d\x52\x4e\xf2\x5c\xd1\xbc\xfd\xee\x9b\xb9\xe5\xc4\xc0\xa8\x77" + "\x67\xa4\xd1\x95\x34\xe4\x6d\x5f\x25\x02\x8d\x65\xdd\x11\x63\x55" + "\x04\x01\x21\x60\xc1\x5c\xef\x77\x33\x01\x1c\xa2\x11\x2b\xdd\x2b" + "\x74\x99\x23\x38\x05\x1b\x7e\x2e\x01\x52\xfe\x9c\x23\xde\x3e\x1a" + "\x72\xf4\xff\x7b\x02\xaa\x08\xcf\xe0\x5b\x83\xbe\x85\x5a\xe8\x9d" + "\x11\x3e\xff\x2f\xc6\x97\x67\x36\x6c\x0f\x81\x9c\x26\x29\xb1\x0f" + "\xbb\x53\xbd\xf4\xec\x2a\x84\x41\x28\x3b\x86\x40\x95\x69\x55\x5f" + "\x30\xee\xda\x1e\x6c\x4b\x25\xd6\x2f\x2c\x0e\x3c\x1a\x26\xa0\x3e" + "\xef\x09\xc6\x2b\xe5\xa1\x0c\x03\xa8\xf5\x39\x70\x31\xc4\x32\x79" + "\xd1\xd9\xc2\xcc\x32\x4a\xf1\x2f\x57\x5a\xcc\xe5\xc3\xc5\xd5\x4e" + "\x86\x56\xca\x64\xdb\xab\x61\x85\x8f\xf9\x20\x02\x40\x66\x76\x9e" + "\x5e\xd4\xac\xf0\x47\xa6\x50\x5f\xc2\xaf\x55\x9b\xa3\xc9\x8b\xf8" + "\x42\xd5\xcf\x1a\x95\x22\xd9\xd1\x0b\x92\x51\xca\xde\x46\x02\x0d" + "\x8b\xee\xd9\xa0\x04\x74\xf5\x0e\xb0\x3a\x62\xec\x3c\x91\x29\x33" + "\xa7\x78\x22\x92\xac\x27\xe6\x2d\x6f\x56\x8a\x5d\x72\xc2\xf1\x5c" + "\x54\x11\x97\x24\x61\xcb\x0c\x52\xd4\x57\x56\x22\x86\xf0\x19\x27" + "\x76\x30\x04\xf4\x39\x7b\x1a\x5a\x04\x0d\xec\x59\x9a\x31\x4c\x40" + "\x19\x6d\x3c\x41\x1b\x0c\xca\xeb\x25\x39\x6c\x96\xf8\x55\xd0\xec", + .expected_ss = + "\xf9\x55\x4f\x48\x38\x74\xb7\x46\xa3\xc4\x2e\x88\xf0\x34\xab\x1d" + "\xcd\xa5\x58\xa7\x95\x88\x36\x62\x6f\x8a\xbd\xf2\xfb\x6f\x3e\xb9" + "\x91\x65\x58\xef\x70\x2f\xd5\xc2\x97\x70\xcb\xce\x8b\x78\x1c\xe0" + "\xb9\xfa\x77\x34\xd2\x4a\x19\x58\x11\xfd\x93\x84\x40\xc0\x8c\x19" + "\x8b\x98\x50\x83\xba\xfb\xe2\xad\x8b\x81\x84\x63\x90\x41\x4b\xf8" + "\xe8\x78\x86\x04\x09\x8d\x84\xd1\x43\xfd\xa3\x58\x21\x2a\x3b\xb1" + "\xa2\x5b\x48\x74\x3c\xa9\x16\x34\x28\xf0\x8e\xde\xe2\xcf\x8e\x68" + "\x53\xab\x65\x06\xb7\x86\xb1\x08\x4f\x73\x97\x00\x10\x95\xd1\x84" + "\x72\xcf\x14\xdb\xff\xa7\x80\xd8\xe5\xf2\x2c\x89\x37\xb0\x81\x2c" + "\xf5\xd6\x7d\x1b\xb0\xe2\x8e\x87\x32\x3d\x37\x6a\x79\xaa\xe7\x08" + "\xc9\x67\x55\x5f\x1c\xae\xa6\xf5\xef\x79\x3a\xaf\x3f\x82\x14\xe2" + "\xf3\x69\x91\xed\xb7\x9e\xc9\xde\xd0\x29\x70\xd9\xeb\x0f\xf5\xc7" + "\xf6\x7c\xa7\x7f\xec\xed\xe1\xbd\x13\xe1\x43\xe4\x42\x30\xe3\x5f" + "\xe0\xf3\x15\x55\x2f\x7a\x42\x17\x67\xcb\xc2\x4f\xd0\x85\xfc\x6c" + "\xec\xe8\xfc\x25\x78\x4b\xe4\x0f\xd4\x3d\x78\x28\xd3\x53\x79\xcb" + "\x2c\x82\x67\x9a\xdc\x32\x55\xd2\xda\xae\xd8\x61\xce\xd6\x59\x0b" + "\xc5\x44\xeb\x08\x81\x8c\x65\xb2\xb7\xa6\xff\xf7\xbf\x99\xc6\x8a" + "\xbe\xde\xc2\x17\x56\x05\x6e\xd2\xf1\x1e\xa2\x04\xeb\x02\x74\xaa" + "\x04\xfc\xf0\x6b\xd4\xfc\xf0\x7a\x5f\xfe\xe2\x74\x7f\xeb\x9b\x6a" + "\x8a\x09\x96\x5d\xe1\x91\xb6\x9e\x37\xd7\x63\xd7\xb3\x5c\xb5\xa3" + "\x5f\x62\x00\xdf\xc5\xbf\x85\xba\xa7\xa9\xb6\x1f\x76\x78\x65\x01" + "\xfe\x1d\x6c\xfe\x15\x9e\xf4\xb1\xbc\x8d\xad\x3c\xec\x69\x27\x57" + "\xa4\x89\x77\x46\xe1\x49\xc7\x22\xde\x79\xe0\xf7\x3a\xa1\x59\x8b" + "\x59\x71\xcc\xd6\x18\x24\xc1\x8a\x2f\xe3\xdf\xdd\x6c\xf7\x62\xaa" + "\x15\xaa\x39\x37\x3b\xaf\x7d\x6e\x88\xeb\x19\xa8\xa0\x26\xd3\xaa" + "\x2d\xcc\x5f\x56\x99\x86\xa9\xed\x4d\x02\x31\x40\x97\x70\x83\xa7" + "\x08\x98\x7e\x49\x46\xd9\x75\xb5\x7a\x6a\x40\x69\xa0\x6d\xb2\x18" + "\xc0\xad\x88\x05\x02\x95\x6f\xf7\x8f\xcb\xa2\xe4\x7b\xab\x4a\x0f" + "\x9a\x1b\xef\xcc\xd1\x6a\x5d\x1e\x6a\x2a\x8b\x5b\x80\xbc\x5f\x38" + "\xdd\xaf\xad\x44\x15\xb4\xaf\x26\x1c\x1a\x4d\xa7\x4b\xec\x88\x33" + "\x24\x42\xb5\x0c\x9c\x56\xd4\xba\xa7\xb9\x65\xd5\x76\xb2\xbc\x16" + "\x8e\xfa\x0c\x7a\xc0\xa2\x2c\x5a\x39\x56\x7d\xe6\xf8\xa9\xf4\x49" + "\xd0\x50\xf2\x5e\x4b\x0a\x43\xe4\x9a\xbb\xea\x35\x28\x99\x84\x83" + "\xec\xc1\xa0\x68\x15\x9a\x2b\x01\x04\x48\x09\x11\x1b\xb6\xa4\xd8" + "\x03\xad\xb6\x4c\x9e\x1d\x90\xae\x88\x0f\x75\x95\x25\xa0\x27\x13" + "\xb7\x4f\xe2\x3e\xd5\x59\x1a\x7c\xde\x95\x14\x28\xd1\xde\x84\xe4" + "\x07\x7c\x5b\x06\xd6\xe6\x9c\x8a\xbe\xd2\xb4\x62\xd1\x67\x8a\x9c" + "\xac\x4f\xfa\x70\xd6\xc8\xc0\xeb\x5e\xf6\x3e\xdc\x48\x8e\xce\x3f" + "\x92\x3e\x60\x77\x63\x60\x6b\x76\x04\xa5\xba\xc9\xab\x92\x4e\x0d" + "\xdc\xca\x82\x44\x5f\x3a\x42\xeb\x01\xe7\xe0\x33\xb3\x32\xaf\x4b" + "\x81\x35\x2d\xb6\x57\x15\xfe\x52\xc7\x54\x2e\x41\x3b\x22\x6b\x12" + "\x72\xdb\x5c\x66\xd0\xb6\xb4\xfe\x90\xc0\x20\x34\x95\xf9\xe4\xc7" + "\x7e\x71\x89\x4f\x6f\xfb\x2a\xf3\xdf\x3f\xe3\xcf\x0e\x1a\xd9\xf2" + "\xc1\x02\x67\x5d\xdc\xf1\x7d\xe8\xcf\x64\x77\x4d\x12\x03\x77\x2c" + "\xfb\xe1\x59\xf7\x2c\x96\x9c\xaf\x46\x9c\xc7\x67\xcf\xee\x94\x50" + "\xc7\xa1\x23\xe6\x9f\x4d\x73\x92\xad\xf9\x4a\xce\xdb\x44\xd5\xe3" + "\x17\x05\x37\xdb\x9c\x6c\xc5\x7e\xb7\xd4\x11\x4a\x8c\x51\x03\xaa" + "\x73\x4b\x16\xd9\x79\xf5\xf1\x67\x20\x9b\x25\xe5\x41\x52\x59\x06" + "\x8b\xf2\x23\x2f\x6e\xea\xf3\x24\x0a\x94\xbb\xb8\x7e\xd9\x23\x4a" + "\x9f\x1f\xe1\x13\xb5\xfe\x85\x2f\x4c\xbe\x6a\x66\x02\x1d\x90\xd2" + "\x01\x25\x8a\xfd\x78\x3a\x28\xb8\x18\xc1\x38\x16\x21\x6b\xb4\xf9" + "\x64\x0f\xf1\x73\xc4\x5c\xd1\x41\xf2\xfe\xe7\x26\xad\x79\x12\x75" + "\x49\x48\xdb\x21\x71\x35\xf7\xb7\x46\x5a\xa1\x81\x25\x47\x31\xea" + "\x1d\x76\xbb\x32\x5a\x90\xb0\x42\x1a\x47\xe8\x0c\x82\x92\x43\x1c" + "\x0b\xdd\xe5\x25\xce\xd3\x06\xcc\x59\x5a\xc9\xa0\x01\xac\x29\x12" + "\x31\x2e\x3d\x1a\xed\x3b\xf3\xa7\xef\x52\xc2\x0d\x18\x1f\x03\x28" + "\xc9\x2b\x38\x61\xa4\x01\xc9\x3c\x11\x08\x14\xd4\xe5\x31\xe9\x3c" + "\x1d\xad\xf8\x76\xc4\x84\x9f\xea\x16\x61\x3d\x6d\xa3\x32\x31\xcd" + "\x1c\xca\xb8\x74\xc2\x45\xf3\x01\x9c\x7a\xaf\xfd\xe7\x1e\x5a\x18" + "\xb1\x9d\xbb\x7a\x2d\x34\x40\x17\x49\xad\x1f\xeb\x2d\xa2\x26\xb8" + "\x16\x28\x4b\x72\xdd\xd0\x8d\x85\x4c\xdd\xf8\x57\x48\xd5\x1d\xfb" + "\xbd\xec\x11\x5d\x1e\x9c\x26\x81\xbf\xf1\x16\x12\x32\xc3\xf3\x07" + "\x0e\x6e\x7f\x17\xec\xfb\xf4\x5d\xe2\xb1\xca\x97\xca\x46\x20\x2d" + "\x09\x85\x19\x25\x89\xa8\x9b\x51\x74\xae\xc9\x1b\x4c\xb6\x80\x62", + .secret_size = 1040, + .b_public_size = 1024, + .expected_a_public_size = 1024, + .expected_ss_size = 1024, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x00" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#else + "\x00\x01" /* type */ + "\x00\x10" /* len */ + "\x00\x00\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00", /* g_size */ +#endif + .b_secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x10\x04" /* len */ + "\x00\x04\x00\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x04\x10" /* len */ + "\x00\x00\x04\x00" /* key_size */ + "\x00\x00\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* g_size */ +#endif + /* xa */ + "\x76\x6e\xeb\xf9\xeb\x76\xae\x37\xcb\x19\x49\x8b\xeb\xaf\xb0\x4b" + "\x6d\xe9\x15\xad\xda\xf2\xef\x58\xe9\xd6\xdd\x4c\xb3\x56\xd0\x3b" + "\x00\xb0\x65\xed\xae\xe0\x2e\xdf\x8f\x45\x3f\x3c\x5d\x2f\xfa\x96" + "\x36\x33\xb2\x01\x8b\x0f\xe8\x46\x15\x6d\x60\x5b\xec\x32\xc3\x3b" + "\x06\xf3\xb4\x1b\x9a\xef\x3c\x03\x0e\xcc\xce\x1d\x24\xa0\xc9\x08" + "\x65\xf9\x45\xe5\xd2\x43\x08\x88\x58\xd6\x46\xe7\xbb\x25\xac\xed" + "\x3b\xac\x6f\x5e\xfb\xd6\x19\xa6\x20\x3a\x1d\x0c\xe8\x00\x72\x54" + "\xd7\xd9\xc9\x26\x49\x18\xc6\xb8\xbc\xdd\xf3\xce\xf3\x7b\x69\x04" + "\x5c\x6f\x11\xdb\x44\x42\x72\xb6\xb7\x84\x17\x86\x47\x3f\xc5\xa1" + "\xd8\x86\xef\xe2\x27\x49\x2b\x8f\x3e\x91\x12\xd9\x45\x96\xf7\xe6" + "\x77\x76\x36\x58\x71\x9a\xb1\xdb\xcf\x24\x9e\x7e\xad\xce\x45\xba" + "\xb5\xec\x8e\xb9\xd6\x7b\x3d\x76\xa4\x85\xad\xd8\x49\x9b\x80\x9d" + "\x7f\x9f\x85\x09\x9e\x86\x5b\x6b\xf3\x8d\x39\x5e\x6f\xe4\x30\xc8" + "\xa5\xf3\xdf\x68\x73\x6b\x2e\x9a\xcb\xac\x0a\x0d\x44\xc1\xaf\xb2" + "\x11\x1b\x7c\x43\x08\x44\x43\xe2\x4e\xfd\x93\x30\x99\x09\x12\xbb" + "\xf6\x31\x34\xa5\x3d\x45\x98\xee\xd7\x2a\x1a\x89\xf5\x37\x92\x33" + "\xa0\xdd\xf5\xfb\x1f\x90\x42\x55\x5a\x0b\x82\xff\xf0\x96\x92\x15" + "\x65\x5a\x55\x96\xca\x1b\xd5\xe5\xb5\x94\xde\x2e\xa6\x03\x57\x9e" + "\x15\xe4\x32\x2b\x1f\xb2\x22\x21\xe9\xa0\x05\xd3\x65\x6c\x11\x66" + "\x25\x38\xbb\xa3\x6c\xc2\x0b\x2b\xd0\x7a\x20\x26\x29\x37\x5d\x5f" + "\xd8\xff\x2a\xcd\x46\x6c\xd6\x6e\xe5\x77\x1a\xe6\x33\xf1\x8e\xc8" + "\x10\x30\x11\x00\x27\xf9\x7d\x0e\x28\x43\xa7\x67\x38\x7f\x16\xda" + "\xd0\x01\x8e\xa4\xe8\x6f\xcd\x23\xaf\x77\x52\x34\xad\x7e\xc3\xed" + "\x2d\x10\x0a\x33\xdc\xcf\x1b\x88\x0f\xcc\x48\x7f\x42\xf0\x9e\x13" + "\x1f\xf5\xd1\xe9\x90\x87\xbd\xfa\x5f\x1d\x77\x55\xcb\xc3\x05\xaf" + "\x71\xd0\xe0\xab\x46\x31\xd7\xea\x89\x54\x2d\x39\xaf\xf6\x4f\x74" + "\xaf\x46\x58\x89\x78\x95\x2e\xe6\x90\xb7\xaa\x00\x73\x9f\xed\xb9" + "\x00\xd6\xf6\x6d\x26\x59\xcd\x56\xdb\xf7\x3d\x5f\xeb\x6e\x46\x33" + "\xb1\x23\xed\x9f\x8d\x58\xdc\xb4\x28\x3b\x90\x09\xc4\x61\x02\x1f" + "\xf8\x62\xf2\x6e\xc1\x94\x71\x66\x93\x11\xdf\xaa\x3e\xd7\xb5\xe5" + "\xc1\x78\xe9\x14\xcd\x55\x16\x51\xdf\x8d\xd0\x94\x8c\x43\xe9\xb8" + "\x1d\x42\x7f\x76\xbc\x6f\x87\x42\x88\xde\xd7\x52\x78\x00\x4f\x18" + "\x02\xe7\x7b\xe2\x8a\xc3\xd1\x43\xa5\xac\xda\xb0\x8d\x19\x96\xd4" + "\x81\xe0\x75\xe9\xca\x41\x7e\x1f\x93\x0b\x26\x24\xb3\xaa\xdd\x10" + "\x20\xd3\xf2\x9f\x3f\xdf\x65\xde\x67\x79\xdc\x76\x9f\x3c\x72\x75" + "\x65\x8a\x30\xcc\xd2\xcc\x06\xb1\xab\x62\x86\x78\x5d\xb8\xce\x72" + "\xb3\x12\xc7\x9f\x07\xd0\x6b\x98\x82\x9b\x6c\xbb\x15\xe5\xcc\xf4" + "\xc8\xf4\x60\x81\xdc\xd3\x09\x1b\x5e\xd4\xf3\x55\xcf\x1c\x16\x83" + "\x61\xb4\x2e\xcc\x08\x67\x58\xfd\x46\x64\xbc\x29\x4b\xdd\xda\xec" + "\xdc\xc6\xa9\xa5\x73\xfb\xf8\xf3\xaf\x89\xa8\x9e\x25\x14\xfa\xac" + "\xeb\x1c\x7c\x80\x96\x66\x4d\x41\x67\x9b\x07\x4f\x0a\x97\x17\x1c" + "\x4d\x61\xc7\x2e\x6f\x36\x98\x29\x50\x39\x6d\xe7\x70\xda\xf0\xc8" + "\x05\x80\x7b\x32\xff\xfd\x12\xde\x61\x0d\xf9\x4c\x21\xf1\x56\x72" + "\x3d\x61\x46\xc0\x2d\x07\xd1\x6c\xd3\xbe\x9a\x21\x83\x85\xf7\xed" + "\x53\x95\x44\x40\x8f\x75\x12\x18\xc2\x9a\xfd\x5e\xce\x66\xa6\x7f" + "\x57\xc0\xd7\x73\x76\xb3\x13\xda\x2e\x58\xc6\x27\x40\xb2\x2d\xef" + "\x7d\x72\xb4\xa8\x75\x6f\xcc\x5f\x42\x3e\x2c\x90\x36\x59\xa0\x34" + "\xaa\xce\xbc\x04\x4c\xe6\x56\xc2\xcd\xa6\x1c\x59\x04\x56\x53\xcf" + "\x6d\xd7\xf0\xb1\x4f\x91\xfa\x84\xcf\x4b\x8d\x50\x4c\xf8\x2a\x31" + "\x5f\xe3\xba\x79\xb4\xcc\x59\x64\xe3\x7a\xfa\xf6\x06\x9d\x04\xbb" + "\xce\x61\xbf\x9e\x59\x0a\x09\x51\x6a\xbb\x0b\x80\xe0\x91\xc1\x51" + "\x04\x58\x67\x67\x4b\x42\x4f\x95\x68\x75\xe2\x1f\x9c\x14\x70\xfd" + "\x3a\x8a\xce\x8b\x04\xa1\x89\xe7\xb4\xbf\x70\xfe\xf3\x0c\x48\x04" + "\x3a\xd2\x85\x68\x03\xe7\xfa\xec\x5b\x55\xb7\x95\xfd\x5b\x19\x35" + "\xad\xcb\x4a\x63\x03\x44\x64\x2a\x48\x59\x9a\x26\x43\x96\x8c\xe6" + "\xbd\xb7\x90\xd4\x5f\x8d\x08\x28\xa8\xc5\x89\x70\xb9\x6e\xd3\x3b" + "\x76\x0e\x37\x98\x15\x27\xca\xc9\xb0\xe0\xfd\xf3\xc6\xdf\x69\xce" + "\xe1\x5f\x6a\x3e\x5c\x86\xe2\x58\x41\x11\xf0\x7e\x56\xec\xe4\xc9" + "\x0d\x87\x91\xfb\xb9\xc8\x0d\x34\xab\xb0\xc6\xf2\xa6\x00\x7b\x18" + "\x92\xf4\x43\x7f\x01\x85\x2e\xef\x8c\x72\x50\x10\xdb\xf1\x37\x62" + "\x16\x85\x71\x01\xa8\x2b\xf0\x13\xd3\x7c\x0b\xaf\xf1\xf3\xd1\xee" + "\x90\x41\x5f\x7d\x5b\xa9\x83\x4b\xfa\x80\x59\x50\x73\xe1\xc4\xf9" + "\x5e\x4b\xde\xd9\xf5\x22\x68\x5e\x65\xd9\x37\xe4\x1a\x08\x0e\xb1" + "\x28\x2f\x40\x9e\x37\xa8\x12\x56\xb7\xb8\x64\x94\x68\x94\xff\x9f", + .b_public = + "\xa1\x6c\x9e\xda\x45\x4d\xf6\x59\x04\x00\xc1\xc6\x8b\x12\x3b\xcd" + "\x07\xe4\x3e\xec\xac\x9b\xfc\xf7\x6d\x73\x39\x9e\x52\xf8\xbe\x33" + "\xe2\xca\xea\x99\x76\xc7\xc9\x94\x5c\xf3\x1b\xea\x6b\x66\x4b\x51" + "\x90\xf6\x4f\x75\xd5\x85\xf4\x28\xfd\x74\xa5\x57\xb1\x71\x0c\xb6" + "\xb6\x95\x70\x2d\xfa\x4b\x56\xe0\x56\x10\x21\xe5\x60\xa6\x18\xa4" + "\x78\x8c\x07\xc0\x2b\x59\x9c\x84\x5b\xe9\xb9\x74\xbf\xbc\x65\x48" + "\x27\x82\x40\x53\x46\x32\xa2\x92\x91\x9d\xf6\xd1\x07\x0e\x1d\x07" + "\x1b\x41\x04\xb1\xd4\xce\xae\x6e\x46\xf1\x72\x50\x7f\xff\xa8\xa2" + "\xbc\x3a\xc1\xbb\x28\xd7\x7d\xcd\x7a\x22\x01\xaf\x57\xb0\xa9\x02" + "\xd4\x8a\x92\xd5\xe6\x8e\x6f\x11\x39\xfe\x36\x87\x89\x42\x25\x42" + "\xd9\xbe\x67\x15\xe1\x82\x8a\x5e\x98\xc2\xd5\xde\x9e\x13\x1a\xe7" + "\xf9\x9f\x8e\x2d\x49\xdc\x4d\x98\x8c\xdd\xfd\x24\x7c\x46\xa9\x69" + "\x3b\x31\xb3\x12\xce\x54\xf6\x65\x75\x40\xc2\xf1\x04\x92\xe3\x83" + "\xeb\x02\x3d\x79\xc0\xf9\x7c\x28\xb3\x97\x03\xf7\x61\x1c\xce\x95" + "\x1a\xa0\xb3\x77\x1b\xc1\x9f\xf8\xf6\x3f\x4d\x0a\xfb\xfa\x64\x1c" + "\xcb\x37\x5b\xc3\x28\x60\x9f\xd1\xf2\xc4\xee\x77\xaa\x1f\xe9\xa2" + "\x89\x4c\xc6\xb7\xb3\xe4\xa5\xed\xa7\xe8\xac\x90\xdc\xc3\xfb\x56" + "\x9c\xda\x2c\x1d\x1a\x9a\x8c\x82\x92\xee\xdc\xa0\xa4\x01\x6e\x7f" + "\xc7\x0e\xc2\x73\x7d\xa6\xac\x12\x01\xc0\xc0\xc8\x7c\x84\x86\xc7" + "\xa5\x94\xe5\x33\x84\x71\x6e\x36\xe3\x3b\x81\x30\xe0\xc8\x51\x52" + "\x2b\x9e\x68\xa2\x6e\x09\x95\x8c\x7f\x78\x82\xbd\x53\x26\xe7\x95" + "\xe0\x03\xda\xc0\xc3\x6e\xcf\xdc\xb3\x14\xfc\xe9\x5b\x9b\x70\x6c" + "\x93\x04\xab\x13\xf7\x17\x6d\xee\xad\x32\x48\xe9\xa0\x94\x1b\x14" + "\x64\x4f\xa1\xb3\x8d\x6a\xca\x28\xfe\x4a\xf4\xf0\xc5\xb7\xf9\x8a" + "\x8e\xff\xfe\x57\x6f\x20\xdb\x04\xab\x02\x31\x22\x42\xfd\xbd\x77" + "\xea\xce\xe8\xc7\x5d\xe0\x8e\xd6\x66\xd0\xe4\x04\x2f\x5f\x71\xc7" + "\x61\x2d\xa5\x3f\x2f\x46\xf2\xd8\x5b\x25\x82\xf0\x52\x88\xc0\x59" + "\xd3\xa3\x90\x17\xc2\x04\x13\xc3\x13\x69\x4f\x17\xb1\xb3\x46\x4f" + "\xa7\xe6\x8b\x5e\x3e\x95\x0e\xf5\x42\x17\x7f\x4d\x1f\x1b\x7d\x65" + "\x86\xc5\xc8\xae\xae\xd8\x4f\xe7\x89\x41\x69\xfd\x06\xce\x5d\xed" + "\x44\x55\xad\x51\x98\x15\x78\x8d\x68\xfc\x93\x72\x9d\x22\xe5\x1d" + "\x21\xc3\xbe\x3a\x44\x34\xc0\xa3\x1f\xca\xdf\x45\xd0\x5c\xcd\xb7" + "\x72\xeb\xae\x7a\xad\x3f\x05\xa0\xe3\x6e\x5a\xd8\x52\xa7\xf1\x1e" + "\xb4\xf2\xcf\xe7\xdf\xa7\xf2\x22\x00\xb2\xc4\x17\x3d\x2c\x15\x04" + "\x71\x28\x69\x5c\x69\x21\xc8\xf1\x9b\xd8\xc7\xbc\x27\xa3\x85\xe9" + "\x53\x77\xd3\x65\xc3\x86\xdd\xb3\x76\x13\xfb\xa1\xd4\xee\x9d\xe4" + "\x51\x3f\x83\x59\xe4\x47\xa8\xa6\x0d\x68\xd5\xf6\xf4\xca\x31\xcd" + "\x30\x48\x34\x90\x11\x8e\x87\xe9\xea\xc9\xd0\xc3\xba\x28\xf9\xc0" + "\xc9\x8e\x23\xe5\xc2\xee\xf2\x47\x9c\x41\x1c\x10\x33\x27\x23\x49" + "\xe5\x0d\x18\xbe\x19\xc1\xba\x6c\xdc\xb7\xa1\xe7\xc5\x0d\x6f\xf0" + "\x8c\x62\x6e\x0d\x14\xef\xef\xf2\x8e\x01\xd2\x76\xf5\xc1\xe1\x92" + "\x3c\xb3\x76\xcd\xd8\xdd\x9b\xe0\x8e\xdc\x24\x34\x13\x65\x0f\x11" + "\xaf\x99\x7a\x2f\xe6\x1f\x7d\x17\x3e\x8a\x68\x9a\x37\xc8\x8d\x3e" + "\xa3\xfe\xfe\x57\x22\xe6\x0e\x50\xb5\x98\x0b\x71\xd8\x01\xa2\x8d" + "\x51\x96\x50\xc2\x41\x31\xd8\x23\x98\xfc\xd1\x9d\x7e\x27\xbb\x69" + "\x78\xe0\x87\xf7\xe4\xdd\x58\x13\x9d\xec\x00\xe4\xb9\x70\xa2\x94" + "\x5d\x52\x4e\xf2\x5c\xd1\xbc\xfd\xee\x9b\xb9\xe5\xc4\xc0\xa8\x77" + "\x67\xa4\xd1\x95\x34\xe4\x6d\x5f\x25\x02\x8d\x65\xdd\x11\x63\x55" + "\x04\x01\x21\x60\xc1\x5c\xef\x77\x33\x01\x1c\xa2\x11\x2b\xdd\x2b" + "\x74\x99\x23\x38\x05\x1b\x7e\x2e\x01\x52\xfe\x9c\x23\xde\x3e\x1a" + "\x72\xf4\xff\x7b\x02\xaa\x08\xcf\xe0\x5b\x83\xbe\x85\x5a\xe8\x9d" + "\x11\x3e\xff\x2f\xc6\x97\x67\x36\x6c\x0f\x81\x9c\x26\x29\xb1\x0f" + "\xbb\x53\xbd\xf4\xec\x2a\x84\x41\x28\x3b\x86\x40\x95\x69\x55\x5f" + "\x30\xee\xda\x1e\x6c\x4b\x25\xd6\x2f\x2c\x0e\x3c\x1a\x26\xa0\x3e" + "\xef\x09\xc6\x2b\xe5\xa1\x0c\x03\xa8\xf5\x39\x70\x31\xc4\x32\x79" + "\xd1\xd9\xc2\xcc\x32\x4a\xf1\x2f\x57\x5a\xcc\xe5\xc3\xc5\xd5\x4e" + "\x86\x56\xca\x64\xdb\xab\x61\x85\x8f\xf9\x20\x02\x40\x66\x76\x9e" + "\x5e\xd4\xac\xf0\x47\xa6\x50\x5f\xc2\xaf\x55\x9b\xa3\xc9\x8b\xf8" + "\x42\xd5\xcf\x1a\x95\x22\xd9\xd1\x0b\x92\x51\xca\xde\x46\x02\x0d" + "\x8b\xee\xd9\xa0\x04\x74\xf5\x0e\xb0\x3a\x62\xec\x3c\x91\x29\x33" + "\xa7\x78\x22\x92\xac\x27\xe6\x2d\x6f\x56\x8a\x5d\x72\xc2\xf1\x5c" + "\x54\x11\x97\x24\x61\xcb\x0c\x52\xd4\x57\x56\x22\x86\xf0\x19\x27" + "\x76\x30\x04\xf4\x39\x7b\x1a\x5a\x04\x0d\xec\x59\x9a\x31\x4c\x40" + "\x19\x6d\x3c\x41\x1b\x0c\xca\xeb\x25\x39\x6c\x96\xf8\x55\xd0\xec", + .secret_size = 16, + .b_secret_size = 1040, + .b_public_size = 1024, + .expected_a_public_size = 1024, + .expected_ss_size = 1024, + .genkey = true, + }, }; static const struct kpp_testvec ecdh_p192_tv_template[] = { @@ -3679,294 +4780,6 @@ static const struct hash_testvec rmd160_tv_template[] = { } }; -static const struct hash_testvec crct10dif_tv_template[] = { - { - .plaintext = "abc", - .psize = 3, - .digest = (u8 *)(u16 []){ 0x443b }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 79, - .digest = (u8 *)(u16 []){ 0x4b70 }, - }, { - .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" - "ddddddddddddd", - .psize = 56, - .digest = (u8 *)(u16 []){ 0x9ce3 }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 319, - .digest = (u8 *)(u16 []){ 0x44c6 }, - }, { - .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" - "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" - "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a" - "\xa1\x38\xcf\x43\xda\x71\x08\x7c" - "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee" - "\x85\x1c\x90\x27\xbe\x32\xc9\x60" - "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2" - "\x46\xdd\x74\x0b\x7f\x16\xad\x21" - "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93" - "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05" - "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77" - "\x0e\x82\x19\xb0\x24\xbb\x52\xe9" - "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38" - "\xcf\x66\xfd\x71\x08\x9f\x13\xaa" - "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c" - "\xb3\x27\xbe\x55\xec\x60\xf7\x8e" - "\x02\x99\x30\xc7\x3b\xd2\x69\x00" - "\x74\x0b\xa2\x16\xad\x44\xdb\x4f" - "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1" - "\x58\xef\x63\xfa\x91\x05\x9c\x33" - "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5" - "\x19\xb0\x47\xde\x52\xe9\x80\x17" - "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66" - "\xfd\x94\x08\x9f\x36\xcd\x41\xd8" - "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a" - "\xe1\x55\xec\x83\x1a\x8e\x25\xbc" - "\x30\xc7\x5e\xf5\x69\x00\x97\x0b" - "\xa2\x39\xd0\x44\xdb\x72\x09\x7d" - "\x14\xab\x1f\xb6\x4d\xe4\x58\xef" - "\x86\x1d\x91\x28\xbf\x33\xca\x61" - "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3" - "\x47\xde\x75\x0c\x80\x17\xae\x22" - "\xb9\x50\xe7\x5b\xf2\x89\x20\x94" - "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06" - "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78" - "\x0f\x83\x1a\xb1\x25\xbc\x53\xea" - "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39" - "\xd0\x67\xfe\x72\x09\xa0\x14\xab" - "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d" - "\xb4\x28\xbf\x56\xed\x61\xf8\x8f" - "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01" - "\x75\x0c\xa3\x17\xae\x45\xdc\x50" - "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2" - "\x59\xf0\x64\xfb\x92\x06\x9d\x34" - "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6" - "\x1a\xb1\x48\xdf\x53\xea\x81\x18" - "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67" - "\xfe\x95\x09\xa0\x37\xce\x42\xd9" - "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b" - "\xe2\x56\xed\x84\x1b\x8f\x26\xbd" - "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c" - "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e" - "\x15\xac\x20\xb7\x4e\xe5\x59\xf0" - "\x87\x1e\x92\x29\xc0\x34\xcb\x62" - "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4" - "\x48\xdf\x76\x0d\x81\x18\xaf\x23" - "\xba\x51\xe8\x5c\xf3\x8a\x21\x95" - "\x2c\xc3\x37\xce\x65\xfc\x70\x07" - "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79" - "\x10\x84\x1b\xb2\x26\xbd\x54\xeb" - "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a" - "\xd1\x68\xff\x73\x0a\xa1\x15\xac" - "\x43\xda\x4e\xe5\x7c\x13\x87\x1e" - "\xb5\x29\xc0\x57\xee\x62\xf9\x90" - "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02" - "\x76\x0d\xa4\x18\xaf\x46\xdd\x51" - "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3" - "\x5a\xf1\x65\xfc\x93\x07\x9e\x35" - "\xcc\x40\xd7\x6e\x05\x79\x10\xa7" - "\x1b\xb2\x49\xe0\x54\xeb\x82\x19" - "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68" - "\xff\x96\x0a\xa1\x38\xcf\x43\xda" - "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c" - "\xe3\x57\xee\x85\x1c\x90\x27\xbe" - "\x32\xc9\x60\xf7\x6b\x02\x99\x0d" - "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f" - "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1" - "\x88\x1f\x93\x2a\xc1\x35\xcc\x63" - "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5" - "\x49\xe0\x77\x0e\x82\x19\xb0\x24" - "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96" - "\x2d\xc4\x38\xcf\x66\xfd\x71\x08" - "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a" - "\x11\x85\x1c\xb3\x27\xbe\x55\xec" - "\x60\xf7\x8e\x02\x99\x30\xc7\x3b" - "\xd2\x69\x00\x74\x0b\xa2\x16\xad" - "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f" - "\xb6\x2a\xc1\x58\xef\x63\xfa\x91" - "\x05\x9c\x33\xca\x3e\xd5\x6c\x03" - "\x77\x0e\xa5\x19\xb0\x47\xde\x52" - "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4" - "\x5b\xf2\x66\xfd\x94\x08\x9f\x36" - "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8" - "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a" - "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69" - "\x00\x97\x0b\xa2\x39\xd0\x44\xdb" - "\x72\x09\x7d\x14\xab\x1f\xb6\x4d" - "\xe4\x58\xef\x86\x1d\x91\x28\xbf" - "\x33\xca\x61\xf8\x6c\x03\x9a\x0e" - "\xa5\x3c\xd3\x47\xde\x75\x0c\x80" - "\x17\xae\x22\xb9\x50\xe7\x5b\xf2" - "\x89\x20\x94\x2b\xc2\x36\xcd\x64" - "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6" - "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25" - "\xbc\x53\xea\x5e\xf5\x8c\x00\x97" - "\x2e\xc5\x39\xd0\x67\xfe\x72\x09" - "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b" - "\x12\x86\x1d\xb4\x28\xbf\x56\xed" - "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c" - "\xd3\x6a\x01\x75\x0c\xa3\x17\xae" - "\x45\xdc\x50\xe7\x7e\x15\x89\x20" - "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92" - "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04" - "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53" - "\xea\x81\x18\x8c\x23\xba\x2e\xc5" - "\x5c\xf3\x67\xfe\x95\x09\xa0\x37" - "\xce\x42\xd9\x70\x07\x7b\x12\xa9" - "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b" - "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a" - "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc" - "\x73\x0a\x7e\x15\xac\x20\xb7\x4e" - "\xe5\x59\xf0\x87\x1e\x92\x29\xc0" - "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f" - "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81" - "\x18\xaf\x23\xba\x51\xe8\x5c\xf3" - "\x8a\x21\x95\x2c\xc3\x37\xce\x65" - "\xfc\x70\x07\x9e\x12\xa9\x40\xd7" - "\x4b\xe2\x79\x10\x84\x1b\xb2\x26" - "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98" - "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a" - "\xa1\x15\xac\x43\xda\x4e\xe5\x7c" - "\x13\x87\x1e\xb5\x29\xc0\x57\xee" - "\x62\xf9\x90\x04\x9b\x32\xc9\x3d" - "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf" - "\x46\xdd\x51\xe8\x7f\x16\x8a\x21" - "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93" - "\x07\x9e\x35\xcc\x40\xd7\x6e\x05" - "\x79\x10\xa7\x1b\xb2\x49\xe0\x54" - "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6" - "\x5d\xf4\x68\xff\x96\x0a\xa1\x38" - "\xcf\x43\xda\x71\x08\x7c\x13\xaa" - "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c" - "\x90\x27\xbe\x32\xc9\x60\xf7\x6b" - "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd" - "\x74\x0b\x7f\x16\xad\x21\xb8\x4f" - "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1" - "\x35\xcc\x63\xfa\x6e\x05\x9c\x10" - "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82" - "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4" - "\x8b\x22\x96\x2d\xc4\x38\xcf\x66" - "\xfd\x71\x08\x9f\x13\xaa\x41\xd8" - "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27" - "\xbe\x55\xec\x60\xf7\x8e\x02\x99" - "\x30\xc7\x3b\xd2\x69\x00\x74\x0b" - "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d" - "\x14\x88\x1f\xb6\x2a\xc1\x58\xef" - "\x63\xfa\x91\x05\x9c\x33\xca\x3e" - "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0" - "\x47\xde\x52\xe9\x80\x17\x8b\x22" - "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94" - "\x08\x9f\x36\xcd\x41\xd8\x6f\x06" - "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55" - "\xec\x83\x1a\x8e\x25\xbc\x30\xc7" - "\x5e\xf5\x69\x00\x97\x0b\xa2\x39" - "\xd0\x44\xdb\x72\x09\x7d\x14\xab" - "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d" - "\x91\x28\xbf\x33\xca\x61\xf8\x6c" - "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde" - "\x75\x0c\x80\x17\xae\x22\xb9\x50" - "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2" - "\x36\xcd\x64\xfb\x6f\x06\x9d\x11" - "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83" - "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5" - "\x8c\x00\x97\x2e\xc5\x39\xd0\x67" - "\xfe\x72\x09\xa0\x14\xab\x42\xd9" - "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28" - "\xbf\x56\xed\x61\xf8\x8f\x03\x9a" - "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c" - "\xa3\x17\xae\x45\xdc\x50\xe7\x7e" - "\x15\x89\x20\xb7\x2b\xc2\x59\xf0" - "\x64\xfb\x92\x06\x9d\x34\xcb\x3f" - "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1" - "\x48\xdf\x53\xea\x81\x18\x8c\x23" - "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95" - "\x09\xa0\x37\xce\x42\xd9\x70\x07" - "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56" - "\xed\x84\x1b\x8f\x26\xbd\x31\xc8" - "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a" - "\xd1\x45\xdc\x73\x0a\x7e\x15\xac" - "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e" - "\x92\x29\xc0\x34\xcb\x62\xf9\x6d" - "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf" - "\x76\x0d\x81\x18\xaf\x23\xba\x51" - "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3" - "\x37\xce\x65\xfc\x70\x07\x9e\x12" - "\xa9\x40\xd7\x4b\xe2\x79\x10\x84" - "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6" - "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68" - "\xff\x73\x0a\xa1\x15\xac\x43\xda" - "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29" - "\xc0\x57\xee\x62\xf9\x90\x04\x9b" - "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d" - "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f" - "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1" - "\x65\xfc\x93\x07\x9e\x35\xcc\x40" - "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2" - "\x49\xe0\x54\xeb\x82\x19\x8d\x24" - "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96" - "\x0a\xa1\x38\xcf\x43\xda\x71\x08" - "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57" - "\xee\x85\x1c\x90\x27\xbe\x32\xc9" - "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b" - "\xd2\x46\xdd\x74\x0b\x7f\x16\xad" - "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f" - "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e" - "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0" - "\x77\x0e\x82\x19\xb0\x24\xbb\x52" - "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4" - "\x38\xcf\x66\xfd\x71\x08\x9f\x13" - "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85" - "\x1c\xb3\x27\xbe\x55\xec\x60\xf7" - "\x8e\x02\x99\x30\xc7\x3b\xd2\x69" - "\x00\x74\x0b\xa2\x16\xad\x44\xdb" - "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a" - "\xc1\x58\xef\x63\xfa\x91\x05\x9c" - "\x33\xca\x3e\xd5\x6c\x03\x77\x0e" - "\xa5\x19\xb0\x47\xde\x52\xe9\x80" - "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2" - "\x66\xfd\x94\x08\x9f\x36\xcd\x41" - "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3" - "\x4a\xe1\x55\xec\x83\x1a\x8e\x25" - "\xbc\x30\xc7\x5e\xf5\x69\x00\x97" - "\x0b\xa2\x39\xd0\x44\xdb\x72\x09" - "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58" - "\xef\x86\x1d\x91\x28\xbf\x33\xca" - "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c" - "\xd3\x47\xde\x75\x0c\x80\x17\xae" - "\x22\xb9\x50\xe7\x5b\xf2\x89\x20" - "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f" - "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1" - "\x78\x0f\x83\x1a\xb1\x25\xbc\x53" - "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5" - "\x39\xd0\x67\xfe\x72\x09\xa0\x14" - "\xab\x42\xd9\x4d\xe4\x7b\x12\x86" - "\x1d\xb4\x28\xbf\x56\xed\x61\xf8" - "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a" - "\x01\x75\x0c\xa3\x17\xae\x45\xdc" - "\x50\xe7\x7e\x15\x89\x20\xb7\x2b" - "\xc2\x59\xf0\x64\xfb\x92\x06\x9d" - "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f" - "\xa6\x1a\xb1\x48\xdf\x53\xea\x81" - "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3" - "\x67\xfe\x95\x09\xa0\x37\xce\x42" - "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4" - "\x4b\xe2\x56\xed\x84\x1b\x8f\x26" - "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98", - .psize = 2048, - .digest = (u8 *)(u16 []){ 0x23ca }, - } -}; - /* * Streebog test vectors from RFC 6986 and GOST R 34.11-2012 */ @@ -4083,65 +4896,6 @@ static const struct hash_testvec hmac_streebog512_tv_template[] = { }, }; -/* - * SM2 test vectors. - */ -static const struct akcipher_testvec sm2_tv_template[] = { - { /* Generated from openssl */ - .key = - "\x04" - "\x8e\xa0\x33\x69\x91\x7e\x3d\xec\xad\x8e\xf0\x45\x5e\x13\x3e\x68" - "\x5b\x8c\xab\x5c\xc6\xc8\x50\xdf\x91\x00\xe0\x24\x73\x4d\x31\xf2" - "\x2e\xc0\xd5\x6b\xee\xda\x98\x93\xec\xd8\x36\xaa\xb9\xcf\x63\x82" - "\xef\xa7\x1a\x03\xed\x16\xba\x74\xb8\x8b\xf9\xe5\x70\x39\xa4\x70", - .key_len = 65, - .param_len = 0, - .c = - "\x30\x45" - "\x02\x20" - "\x70\xab\xb6\x7d\xd6\x54\x80\x64\x42\x7e\x2d\x05\x08\x36\xc9\x96" - "\x25\xc2\xbb\xff\x08\xe5\x43\x15\x5e\xf3\x06\xd9\x2b\x2f\x0a\x9f" - "\x02\x21" - "\x00" - "\xbf\x21\x5f\x7e\x5d\x3f\x1a\x4d\x8f\x84\xc2\xe9\xa6\x4c\xa4\x18" - "\xb2\xb8\x46\xf4\x32\x96\xfa\x57\xc6\x29\xd4\x89\xae\xcc\xda\xdb", - .c_size = 71, - .algo = OID_SM2_with_SM3, - .m = - "\x47\xa7\xbf\xd3\xda\xc4\x79\xee\xda\x8b\x4f\xe8\x40\x94\xd4\x32" - "\x8f\xf1\xcd\x68\x4d\xbd\x9b\x1d\xe0\xd8\x9a\x5d\xad\x85\x47\x5c", - .m_size = 32, - .public_key_vec = true, - .siggen_sigver_test = true, - }, - { /* From libgcrypt */ - .key = - "\x04" - "\x87\x59\x38\x9a\x34\xaa\xad\x07\xec\xf4\xe0\xc8\xc2\x65\x0a\x44" - "\x59\xc8\xd9\x26\xee\x23\x78\x32\x4e\x02\x61\xc5\x25\x38\xcb\x47" - "\x75\x28\x10\x6b\x1e\x0b\x7c\x8d\xd5\xff\x29\xa9\xc8\x6a\x89\x06" - "\x56\x56\xeb\x33\x15\x4b\xc0\x55\x60\x91\xef\x8a\xc9\xd1\x7d\x78", - .key_len = 65, - .param_len = 0, - .c = - "\x30\x44" - "\x02\x20" - "\xd9\xec\xef\xe8\x5f\xee\x3c\x59\x57\x8e\x5b\xab\xb3\x02\xe1\x42" - "\x4b\x67\x2c\x0b\x26\xb6\x51\x2c\x3e\xfc\xc6\x49\xec\xfe\x89\xe5" - "\x02\x20" - "\x43\x45\xd0\xa5\xff\xe5\x13\x27\x26\xd0\xec\x37\xad\x24\x1e\x9a" - "\x71\x9a\xa4\x89\xb0\x7e\x0f\xc4\xbb\x2d\x50\xd0\xe5\x7f\x7a\x68", - .c_size = 70, - .algo = OID_SM2_with_SM3, - .m = - "\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff\x00" - "\x12\x34\x56\x78\x9a\xbc\xde\xf0\x12\x34\x56\x78\x9a\xbc\xde\xf0", - .m_size = 32, - .public_key_vec = true, - .siggen_sigver_test = true, - }, -}; - /* Example vectors below taken from * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf * @@ -5713,6 +6467,7 @@ static const struct hash_testvec hmac_sha1_tv_template[] = { .psize = 28, .digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74" "\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 20, @@ -5802,6 +6557,7 @@ static const struct hash_testvec hmac_sha224_tv_template[] = { "\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f" "\x8b\xbe\xa2\xa3\x9e\x61\x48\x00" "\x8f\xd0\x5e\x44", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5945,6 +6701,7 @@ static const struct hash_testvec hmac_sha256_tv_template[] = { "\x6a\x04\x24\x26\x08\x95\x75\xc7" "\x5a\x00\x3f\x08\x9d\x27\x39\x83" "\x9d\xec\x58\xb9\x64\xec\x38\x43", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6261,159 +7018,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { } }; -static const char vmac64_string1[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', - '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03', -}; - -static const char vmac64_string2[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', -}; - -static const char vmac64_string3[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', -}; - -static const char vmac64_string4[33] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm', - 'o', 'p', 'r', 's', 't', 'u', 'w', 'x', - 'z', -}; - -static const char vmac64_string5[143] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k', - ']', '%', '9', '2', '7', '!', 'A', -}; - -static const char vmac64_string6[145] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'p', 't', '*', '7', 'l', 'i', '!', '#', - 'w', '0', 'z', '/', '4', 'A', 'n', -}; - -static const struct hash_testvec vmac64_aes_tv_template[] = { - { /* draft-krovetz-vmac-01 test vector 1 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi", - .psize = 16, - .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b", - }, { /* draft-krovetz-vmac-01 test vector 2 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc", - .psize = 19, - .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5", - }, { /* draft-krovetz-vmac-01 test vector 3 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 64, - .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98", - }, { /* draft-krovetz-vmac-01 test vector 4 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 316, - .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string4, - .psize = sizeof(vmac64_string4), - .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string5, - .psize = sizeof(vmac64_string5), - .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string6, - .psize = sizeof(vmac64_string6), - .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4", - }, -}; - /* * SHA384 HMAC test vectors from RFC4231 */ @@ -6443,6 +7047,7 @@ static const struct hash_testvec hmac_sha384_tv_template[] = { "\xe4\x2e\xc3\x73\x63\x22\x44\x5e" "\x8e\x22\x40\xca\x5e\x69\xe2\xc7" "\x8b\x32\x39\xec\xfa\xb2\x16\x49", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6543,6 +7148,7 @@ static const struct hash_testvec hmac_sha512_tv_template[] = { "\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd" "\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b" "\x63\x6e\x07\x0a\x38\xbc\xe7\x37", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6638,6 +7244,7 @@ static const struct hash_testvec hmac_sha3_224_tv_template[] = { "\x1b\x79\x86\x34\xad\x38\x68\x11" "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b" "\xba\xce\x5e\x66", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6725,6 +7332,7 @@ static const struct hash_testvec hmac_sha3_256_tv_template[] = { "\x35\x96\xbb\xb0\xda\x73\xb8\x87" "\xc9\x17\x1f\x93\x09\x5b\x29\x4a" "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6816,6 +7424,7 @@ static const struct hash_testvec hmac_sha3_384_tv_template[] = { "\x3c\xa1\x35\x08\xa9\x32\x43\xce" "\x48\xc0\x45\xdc\x00\x7f\x26\xa2" "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6915,6 +7524,7 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e" "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83" "\x96\x02\x75\xbe\xb4\xe6\x20\x24", + .fips_skip = 1, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -6989,294 +7599,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { }, }; -/* - * Poly1305 test vectors from RFC7539 A.3. - */ - -static const struct hash_testvec poly1305_tv_template[] = { - { /* Test Vector #1 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #2 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e", - }, { /* Test Vector #3 */ - .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf" - "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0", - }, { /* Test Vector #4 */ - .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" - "\x27\x54\x77\x61\x73\x20\x62\x72" - "\x69\x6c\x6c\x69\x67\x2c\x20\x61" - "\x6e\x64\x20\x74\x68\x65\x20\x73" - "\x6c\x69\x74\x68\x79\x20\x74\x6f" - "\x76\x65\x73\x0a\x44\x69\x64\x20" - "\x67\x79\x72\x65\x20\x61\x6e\x64" - "\x20\x67\x69\x6d\x62\x6c\x65\x20" - "\x69\x6e\x20\x74\x68\x65\x20\x77" - "\x61\x62\x65\x3a\x0a\x41\x6c\x6c" - "\x20\x6d\x69\x6d\x73\x79\x20\x77" - "\x65\x72\x65\x20\x74\x68\x65\x20" - "\x62\x6f\x72\x6f\x67\x6f\x76\x65" - "\x73\x2c\x0a\x41\x6e\x64\x20\x74" - "\x68\x65\x20\x6d\x6f\x6d\x65\x20" - "\x72\x61\x74\x68\x73\x20\x6f\x75" - "\x74\x67\x72\x61\x62\x65\x2e", - .psize = 159, - .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61" - "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62", - }, { /* Test Vector #5 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #6 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #7 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xf0\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x11\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x05\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #8 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .psize = 80, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #9 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xfd\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - }, { /* Test Vector #10 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x14\x00\x00\x00\x00\x00\x00\x00" - "\x55\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #11 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Regression test for overflow in AVX2 implementation */ - .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff", - .psize = 300, - .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" - "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", - } -}; - /* NHPoly1305 test vectors from https://github.com/google/adiantum */ static const struct hash_testvec nhpoly1305_tv_template[] = { { @@ -8689,6 +9011,126 @@ static const struct cipher_testvec des_tv_template[] = { .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", .len = 8, + }, { /* Weak key */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xe0\xe0\xe0\xe0\xf1\xf1\xf1\xf1", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Weak key */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Weak key */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 1a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x01\xfe\x01\xfe\x01\xfe\x01\xfe", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 1b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xfe\x01\xfe\x01\xfe\x01\xfe\x01", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 2a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x1f\xe0\x1f\xe0\x0e\xf1\x0e\xf1", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 2b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xe0\x1f\xe0\x1f\xf1\x0e\xf1\x0e", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 3a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x01\xe0\x01\xe0\x01\xf1\x01\xf1", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 3b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xe0\x01\xe0\x01\xf1\x01\xf1\x01", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 4a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x1f\xfe\x1f\xfe\x0e\xfe\x0e\xfe", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 4b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xfe\x1f\xfe\x1f\xfe\x0e\xfe\x0e", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 5a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x01\x1f\x01\x1f\x01\x0e\x01\x0e", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 5b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\x1f\x01\x1f\x01\x0e\x01\x0e\x01", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 6a */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xe0\xfe\xe0\xfe\xf1\xfe\xf1\xfe", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, + }, { /* Semi-weak key pair 6b */ + .setkey_error = -EINVAL, + .wk = 1, + .key = "\xfe\xe0\xfe\xe0\xfe\xf1\xfe\xf1", + .klen = 8, + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", + .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", + .len = 8, }, { /* Two blocks -- for testing encryption across pages */ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, @@ -13230,101 +13672,1128 @@ static const struct cipher_testvec sm4_ctr_rfc3686_tv_template[] = { } }; -static const struct cipher_testvec sm4_ofb_tv_template[] = { - { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.3 */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", +static const struct cipher_testvec sm4_cts_tv_template[] = { + /* Generated from AES-CTS test vectors */ + { .klen = 16, - .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10" - "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1" - "\x78\x6f\x53\xd7\x25\x3a\x70\x56" - "\xf2\x07\x5d\x28\xb5\x23\x5f\x58" - "\xd5\x00\x27\xe4\x17\x7d\x2b\xce", - .len = 32, - }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 1 */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20", + .len = 17, + .ctext = "\x05\xfe\x23\xee\x17\xa2\x89\x98" + "\xbc\x97\x0a\x0b\x54\x67\xca\xd7" + "\xd6", + }, { .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb" - "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd" - "\xee\xee\xee\xee\xff\xff\xff\xff" - "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", - .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16" - "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7" - "\x1d\x01\xac\xa2\x48\x7c\xa5\x82" - "\xcb\xf5\x46\x3e\x66\x98\x53\x9b", + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20", + .len = 31, + .ctext = "\x15\x46\xe4\x95\xa4\xec\xf0\xb8" + "\x49\xd6\x6a\x9d\x89\xc7\xfd\x70" + "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43", .len = 32, - }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 2 */ - .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10" - "\x01\x23\x45\x67\x89\xab\xcd\xef", + .ctext = "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3" + "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf", + }, { .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb" - "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd" - "\xee\xee\xee\xee\xff\xff\xff\xff" - "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", - .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65" - "\x60\xd7\xf2\x65\x88\x70\x68\x49" - "\x33\xfa\x16\xbd\x5c\xd9\xc8\x56" - "\xca\xca\xa1\xe1\x01\x89\x7a\x97", + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c", + .len = 47, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\xd3\xe1\xdc\xeb\xfa\x04\x11\x99" + "\xde\xcf\x6f\x4d\x7b\x09\x92\x7f" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c\x20", + .len = 48, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f" + "\xbd\x99\x21\x0c\x5e\x4d\xed\x20" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c\x20" + "\x61\x6e\x64\x20\x77\x6f\x6e\x74" + "\x6f\x6e\x20\x73\x6f\x75\x70\x2e", + .len = 64, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3" + "\x58\x19\xa4\x8f\xa9\x68\x5e\x6b" + "\x2c\x0f\x81\x60\x15\x98\x27\x4f" + "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f" + "\xbd\x99\x21\x0c\x5e\x4d\xed\x20", + } +}; + +static const struct cipher_testvec sm4_xts_tv_template[] = { + /* Generated from AES-XTS test vectors */ + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ctext = "\xd9\xb4\x21\xf7\x31\xc8\x94\xfd" + "\xc3\x5b\x77\x29\x1f\xe4\xe3\xb0" + "\x2a\x1f\xb7\x66\x98\xd5\x9f\x0e" + "\x51\x37\x6c\x4a\xda\x5b\xc7\x5d", + .len = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ctext = "\xa7\x4d\x72\x6c\x11\x19\x6a\x32" + "\xbe\x04\xe0\x01\xff\x29\xd0\xc7" + "\x93\x2f\x9f\x3e\xc2\x9b\xfc\xb6" + "\x4d\xd1\x7f\x63\xcb\xd3\xea\x31", + .len = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ctext = "\x7f\x76\x08\x8e\xff\xad\xf7\x0c" + "\x02\xea\x9f\x95\xda\x06\x28\xd3" + "\x51\xbf\xcb\x9e\xac\x05\x63\xbc" + "\xf1\x7b\x71\x0d\xab\x0a\x98\x26", .len = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ctext = "\x54\xdd\x65\xb6\x32\x6f\xae\xa8" + "\xfa\xd1\xa8\x3c\x63\x61\x4a\xf3" + "\x9f\x72\x1d\x8d\xfe\x17\x7a\x30" + "\xb6\x6a\xbf\x6a\x44\x99\x80\xe1" + "\xcd\xbe\x06\xaf\xb7\x33\x36\xf3" + "\x7a\x4d\x39\xde\x96\x4a\x30\xd7" + "\xd0\x4a\x37\x99\x16\x9c\x60\x25" + "\x8f\x6b\x74\x8a\x61\x86\x1a\xa5" + "\xec\x92\xa2\xc1\x5b\x2b\x7c\x61" + "\x5a\x42\xab\xa4\x99\xbb\xd6\xb7" + "\x1d\xb9\xc7\x89\xb2\x18\x20\x89" + "\xa2\x5d\xd3\xdf\x80\x0e\xd1\x86" + "\x4d\x19\xf7\xed\x45\xfd\x17\xa9" + "\x48\x0b\x0f\xb8\x2d\x9b\x7f\xc3" + "\xed\x57\xe9\xa1\x14\x0e\xaa\x77" + "\x8d\xd2\xdd\x67\x9e\x3e\xdc\x3d" + "\xc4\xd5\x5c\x95\x0e\xbc\x53\x1d" + "\x95\x92\xf7\xc4\x63\x82\x56\xd5" + "\x65\x18\x29\x2a\x20\xaf\x98\xfd" + "\xd3\xa6\x36\x00\x35\x0a\x70\xab" + "\x5a\x40\xf4\xc2\x85\x03\x7c\xa0" + "\x1f\x25\x1f\x19\xec\xae\x03\x29" + "\xff\x77\xad\x88\xcd\x5a\x4c\xde" + "\xa2\xae\xab\xc2\x21\x48\xff\xbd" + "\x23\x9b\xd1\x05\x15\xbd\xe1\x13" + "\x1d\xec\x84\x04\xe4\x43\xdc\x76" + "\x31\x40\xd5\xf2\x2b\xf3\x3e\x0c" + "\x68\x72\xd6\xb8\x1d\x63\x0f\x6f" + "\x00\xcd\xd0\x58\xfe\x80\xf9\xcb" + "\xfb\x77\x70\x7f\x93\xce\xe2\xca" + "\x92\xb9\x15\xb8\x30\x40\x27\xc1" + "\x90\xa8\x4e\x2d\x65\xe0\x18\xcc" + "\x6a\x38\x7d\x37\x66\xac\xdb\x28" + "\x25\x32\x84\xe8\xdb\x9a\xcf\x8f" + "\x52\x28\x0d\xdc\x6d\x00\x33\xd2" + "\xcc\xaa\xa4\xf9\xae\xff\x12\x36" + "\x69\xbc\x02\x4f\xd6\x76\x8e\xdf" + "\x8b\xc1\xf8\xd6\x22\xc1\x9c\x60" + "\x9e\xf9\x7f\x60\x91\x90\xcd\x11" + "\x02\x41\xe7\xfb\x08\x4e\xd8\x94" + "\x2d\xa1\xf9\xb9\xcf\x1b\x51\x4b" + "\x61\xa3\x88\xb3\x0e\xa6\x1a\x4a" + "\x74\x5b\x38\x1e\xe7\xad\x6c\x4d" + "\xb1\x27\x54\x53\xb8\x41\x3f\x98" + "\xdf\x6e\x4a\x40\x98\x6e\xe4\xb5" + "\x9a\xf5\xdf\xae\xcd\x30\x12\x65" + "\x17\x90\x67\xa0\x0d\x7c\xa3\x5a" + "\xb9\x5a\xbd\x61\x7a\xde\xa2\x8e" + "\xc1\xc2\x6a\x97\xde\x28\xb8\xbf" + "\xe3\x01\x20\xd6\xae\xfb\xd2\x58" + "\xc5\x9e\x42\xd1\x61\xe8\x06\x5a" + "\x78\x10\x6b\xdc\xa5\xcd\x90\xfb" + "\x3a\xac\x4e\x93\x86\x6c\x8a\x7f" + "\x96\x76\x86\x0a\x79\x14\x5b\xd9" + "\x2e\x02\xe8\x19\xa9\x0b\xe0\xb9" + "\x7c\xc5\x22\xb3\x21\x06\x85\x6f" + "\xdf\x0e\x54\xd8\x8e\x46\x24\x15" + "\x5a\x2f\x1c\x14\xea\xea\xa1\x63" + "\xf8\x58\xe9\x9a\x80\x6e\x79\x1a" + "\xcd\x82\xf1\xb0\xe2\x9f\x00\x28" + "\xa4\xc3\x8e\x97\x6f\x57\x1a\x93" + "\xf4\xfd\x57\xd7\x87\xc2\x4d\xb0" + "\xe0\x1c\xa3\x04\xe5\xa5\xc4\xdd" + "\x50\xcf\x8b\xdb\xf4\x91\xe5\x7c", + .len = 512, + }, { + .key = "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xf8\xf9\xfa\xfb\xfc", + .ctext = "\xa2\x9f\x9e\x4e\x71\xdb\x28\x3c" + "\x80\x0e\xf6\xb7\x8e\x57\x1c\xba" + "\x90\xda\x3b\x6c\x22\x00\x68\x30" + "\x1d\x63\x0d\x9e\x6a\xad\x37\x55" + "\xbc\x77\x1e\xc9\xad\x83\x30\xd5" + "\x27\xb2\x66\x77\x18\x3c\xa6\x39" + "\x9c\x0a\xaa\x1f\x02\xe1\xd5\x65" + "\x9b\x8d\xc5\x97\x3d\xc5\x04\x53" + "\x78\x00\xe3\xb0\x1a\x43\x4e\xb7" + "\xc4\x9f\x38\xc5\x7b\xa4\x70\x64" + "\x78\xe6\x32\xd9\x65\x44\xc5\x64" + "\xb8\x42\x35\x99\xff\x66\x75\xb0" + "\x22\xd3\x9b\x6e\x8d\xcf\x6a\x24" + "\xfd\x92\xb7\x1b\x04\x28\x2a\x61" + "\xdc\x96\x2a\x20\x7a\x2c\xf1\xf9" + "\x12\x15\xf0\x4d\xcf\x2b\xde\x33" + "\x41\xbc\xe7\x85\x87\x22\xb7\x16" + "\x02\x1c\xd8\xa2\x0f\x1f\xa3\xe9" + "\xd8\x45\x48\xe7\xbe\x08\x4e\x4e" + "\x23\x79\x84\xdb\x40\x76\xf5\x13" + "\x78\x92\x4a\x2f\xf9\x1b\xf2\x80" + "\x25\x74\x51\x45\x9a\x77\x78\x97" + "\xd3\xe0\xc7\xc4\x35\x67\x2a\xe6" + "\xb3\x0d\x62\x9f\x8b", + .len = 189, + }, +}; + +static const struct aead_testvec sm4_gcm_tv_template[] = { + { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.1 */ + .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" + "\xFE\xDC\xBA\x98\x76\x54\x32\x10", + .klen = 16, + .iv = "\x00\x00\x12\x34\x56\x78\x00\x00" + "\x00\x00\xAB\xCD", + .ptext = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA" + "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB" + "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC" + "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD" + "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE" + "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" + "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE" + "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA", + .plen = 64, + .assoc = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF" + "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF" + "\xAB\xAD\xDA\xD2", + .alen = 20, + .ctext = "\x17\xF3\x99\xF0\x8C\x67\xD5\xEE" + "\x19\xD0\xDC\x99\x69\xC4\xBB\x7D" + "\x5F\xD4\x6F\xD3\x75\x64\x89\x06" + "\x91\x57\xB2\x82\xBB\x20\x07\x35" + "\xD8\x27\x10\xCA\x5C\x22\xF0\xCC" + "\xFA\x7C\xBF\x93\xD4\x96\xAC\x15" + "\xA5\x68\x34\xCB\xCF\x98\xC3\x97" + "\xB4\x02\x4A\x26\x91\x23\x3B\x8D" + "\x83\xDE\x35\x41\xE4\xC2\xB5\x81" + "\x77\xE0\x65\xA9\xBF\x7B\x62\xEC", + .clen = 80, + }, { /* Generated from AES-GCM test vectors */ + .key = zeroed_string, + .klen = 16, + .ctext = "\x23\x2f\x0c\xfe\x30\x8b\x49\xea" + "\x6f\xc8\x82\x29\xb5\xdc\x85\x8d", + .clen = 16, + }, { + .key = zeroed_string, + .klen = 16, + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x7d\xe2\xaa\x7f\x11\x10\x18\x82" + "\x18\x06\x3b\xe1\xbf\xeb\x6d\x89" + "\xb8\x51\xb5\xf3\x94\x93\x75\x2b" + "\xe5\x08\xf1\xbb\x44\x82\xc5\x57", + .clen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", + .plen = 64, + .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6" + "\x76\x21\x6a\x33\x83\x10\x41\xeb" + "\x09\x58\x00\x11\x7b\xdc\x3f\x75" + "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb" + "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07" + "\x1a\xe5\x48\x3f\xed\xde\x98\x5d" + "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf" + "\xe3\x63\x36\x83\x23\xf7\x5b\x80" + "\x7d\xfe\x77\xef\x71\xb1\x5e\xc9" + "\x52\x6b\x09\xab\x84\x28\x4b\x8a", + .clen = 80, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6" + "\x76\x21\x6a\x33\x83\x10\x41\xeb" + "\x09\x58\x00\x11\x7b\xdc\x3f\x75" + "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb" + "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07" + "\x1a\xe5\x48\x3f\xed\xde\x98\x5d" + "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf" + "\xe3\x63\x36\x83" + "\x89\xf6\xba\x35\xb8\x18\xd3\xcc" + "\x38\x6c\x05\xb3\x8a\xcb\xc9\xde", + .clen = 76, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\xfe\xff\xe9\x92\x86\x65\x73\x1c", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\xc1\x11\x44\x51\xd9\x25\x87\x5b" + "\x0f\xd9\x06\xf3\x33\x44\xbb\x87" + "\x8b\xa3\x77\xd2\x0c\x60\xfa\xcc" + "\x85\x50\x6f\x96\x0c\x54\x54\xc1" + "\x58\x04\x88\x6e\xf4\x26\x35\x7e" + "\x94\x80\x48\x6c\xf2\xf4\x88\x1f" + "\x19\x63\xea\xae\xba\x81\x1a\x5d" + "\x0e\x6f\x59\x08" + "\x33\xac\x5b\xa8\x19\x60\xdb\x1d" + "\xdd\x2e\x22\x2e\xe0\x87\x51\x5d", + .clen = 76, + }, { + .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59" + "\x04\x38\x77\xb0\xb9\xad\xb4\x38", + .klen = 16, + .iv = "\x00\xff\xff\xff\xff\x00\x00\xff" + "\xff\xff\x00\xff", + .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f" + "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0" + "\x58\x83\xf0\xc3\x70\x14\xc0\x5b" + "\x3f\xec\x1d\x25\x3c\x51\xd2\x03" + "\xcf\x59\x74\x1f\xb2\x85\xb4\x07" + "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb" + "\xaf\x08\x44\xbd\x6f\x91\x15\xe1" + "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50" + "\x59\xa9\x97\xab\xbb\x0e\x74\x5c" + "\x00\xa4\x43\x54\x04\x54\x9b\x3b" + "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08" + "\xae\xe6\x10\x3f\x32\x65\xd1\xfc" + "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3" + "\x35\x23\xf4\x20\x41\xd4\xad\x82" + "\x8b\xa4\xad\x96\x1c\x20\x53\xbe" + "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72" + "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7" + "\xad\x49\x3a\xae\x98\xce\xa6\x66" + "\x10\x30\x90\x8c\x55\x83\xd7\x7c" + "\x8b\xe6\x53\xde\xd2\x6e\x18\x21" + "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73" + "\x57\xcc\x89\x09\x75\x9b\x78\x70" + "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5" + "\xfa\x70\x04\x70\xc6\x96\x1c\x7d" + "\x54\x41\x77\xa8\xe3\xb0\x7e\x96" + "\x82\xd9\xec\xa2\x87\x68\x55\xf9" + "\x8f\x9e\x73\x43\x47\x6a\x08\x36" + "\x93\x67\xa8\x2d\xde\xac\x41\xa9" + "\x5c\x4d\x73\x97\x0f\x70\x68\xfa" + "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9" + "\x78\x1f\x51\x07\xe3\x9a\x13\x4e" + "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7" + "\xab\x19\x37\xd9\xba\x76\x5e\xd2" + "\xf2\x53\x15\x17\x4c\x6b\x16\x9f" + "\x02\x66\x49\xca\x7c\x91\x05\xf2" + "\x45\x36\x1e\xf5\x77\xad\x1f\x46" + "\xa8\x13\xfb\x63\xb6\x08\x99\x63" + "\x82\xa2\xed\xb3\xac\xdf\x43\x19" + "\x45\xea\x78\x73\xd9\xb7\x39\x11" + "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81" + "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79" + "\xa4\x47\x7d\x80\x20\x26\xfd\x63" + "\x0a\xc7\x7e\x6d\x75\x47\xff\x76" + "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b" + "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1" + "\x54\x03\xa4\x09\x0c\x37\x7a\x15" + "\x23\x27\x5b\x8b\x4b\xa5\x64\x97" + "\xae\x4a\x50\x73\x1f\x66\x1c\x5c" + "\x03\x25\x3c\x8d\x48\x58\x71\x34" + "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5" + "\xb6\x19\x2b\x84\x2a\x20\xd1\xea" + "\x80\x6f\x96\x0e\x05\x62\xc7\x78" + "\x87\x79\x60\x38\x46\xb4\x25\x57" + "\x6e\x16\x63\xf8\xad\x6e\xd7\x42" + "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a" + "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22" + "\x86\x5c\x74\x3a\xeb\x24\x26\xc7" + "\x09\xfc\x91\x96\x47\x87\x4f\x1a" + "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24" + "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a" + "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5" + "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb" + "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe" + "\x0b\x63\xde\x87\x42\x79\x8a\x68" + "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f" + "\x9d\xd1\xc7\x45\x90\x08\xc9\x83" + "\xe9\x83\x84\xcb\x28\x69\x09\x69" + "\xce\x99\x46\x00\x54\xcb\xd8\x38" + "\xf9\x53\x4a\xbf\x31\xce\x57\x15" + "\x33\xfa\x96\x04\x33\x42\xe3\xc0" + "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6" + "\x19\x95\xd0\x0e\x82\x07\x63\xf9" + "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9" + "\xb5\x9f\x23\x28\x60\xe7\x20\x51" + "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2" + "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb" + "\x78\xc6\x91\x22\x40\x91\x80\xbe" + "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9" + "\x67\x10\xa4\x83\x98\x79\x23\xe7" + "\x92\xda\xa9\x22\x16\xb1\xe7\x78" + "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37" + "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9" + "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d" + "\x48\x11\x06\xbb\x2d\xf2\x63\x88" + "\x3f\x73\x09\xe2\x45\x56\x31\x51" + "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9" + "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66" + "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23" + "\x59\xfa\xfa\xaa\x44\x04\x01\xa7" + "\xa4\x78\xdb\x74\x3d\x8b\xb5", + .plen = 719, + .ctext = "\xdc\xb1\x0f\x2a\xe8\x2d\x1c\x57" + "\xc4\x82\xfa\xd6\x87\xe6\x2f\x50" + "\xbd\x9e\x0a\x42\x31\xf2\xc7\xbb" + "\x21\x63\xa7\x05\x43\x33\xef\x33" + "\x5c\xd3\x47\x55\xce\x5c\xe4\xd4" + "\xe5\x07\x62\x22\xac\x01\xa8\x35" + "\x9c\x59\x34\x30\x8e\xff\x9f\xb4" + "\xd2\x4e\x74\x90\x64\xf2\x78\x5e" + "\x63\xb7\xc5\x08\x1b\x37\xa5\x9e" + "\xc0\xde\xff\xa9\x7f\x0b\xd3\x02" + "\x83\x6e\x33\xfa\x43\x11\xd3\xda" + "\x02\xcf\xcd\x4a\xc0\x78\x1f\x39" + "\x62\xcb\xa3\x95\x7e\x13\x92\x28" + "\xb2\xc4\x7a\xba\xd1\xc6\xf6\x1f" + "\xda\x0b\xf1\xd1\x99\x54\xd8\x3b" + "\x16\xf8\xe6\x97\x1e\xa7\xcf\x49" + "\x69\x84\x01\x4c\xdc\x7a\x34\xff" + "\x01\x08\xa3\x0b\x39\xac\x21\x37" + "\xd8\xb4\x04\x19\x8b\x7a\x7d\x17" + "\x44\xd1\x18\xaf\x1f\xa9\x29\xfe" + "\xfa\x77\xe0\x40\x42\x0c\x79\xb7" + "\xc3\x15\x1b\xd9\x0c\x82\xfc\x16" + "\x70\xd6\x2a\xe9\x94\x72\xc5\xa5" + "\x8a\x58\xbc\xfa\xe0\x88\x39\x4a" + "\x80\xe8\xec\xaf\x60\xac\xe7\xf8" + "\x9c\xf0\xfc\x61\x39\x07\x98\x6b" + "\x88\xe3\x98\x22\x28\x18\x4a\x2d" + "\x25\xef\x10\xe3\x83\x66\x3f\xfd" + "\xc7\x0b\xa3\xfd\x97\xa9\xf4\xbd" + "\xd8\x2a\xee\x4a\x50\xad\xcc\xb5" + "\xc7\xab\xb8\x79\x9c\xd1\xf1\x27" + "\x08\xf5\xf5\xe8\x1b\x66\xce\x41" + "\x56\x60\x94\x86\xf0\x78\xc2\xfa" + "\x5b\x63\x40\xb1\xd1\x1a\x38\x69" + "\x0b\x8c\xb2\xf5\xa2\xbe\x90\x9d" + "\x46\x23\x79\x8b\x3b\x4a\xf4\xbb" + "\x55\xf7\x58\x9d\xaf\x59\xff\x74" + "\xf3\xb9\xc4\x26\xb1\xf8\xe1\x28" + "\x8b\x5e\x8f\x6d\x64\xe7\xe8\x63" + "\xd2\x9e\xcb\xee\xae\x19\x04\x1d" + "\x05\xf0\x9d\x99\x7b\x33\x33\xae" + "\x6e\xe5\x09\xdd\x67\x51\xc4\xc8" + "\x6a\xc7\x36\x35\xc9\x93\x76\xa1" + "\xa8\x1c\xfa\x75\x92\x34\x0e\x7d" + "\x3d\x1d\xef\x00\xfd\xa5\x25\x12" + "\x7c\x91\x21\x41\xcc\x50\x47\xa9" + "\x22\x50\x24\x96\x34\x79\x3d\xe8" + "\x3f\xa0\x56\xaf\x98\x53\x55\xc3" + "\x46\x1b\x17\x54\xb8\xb0\xb7\xe0" + "\xe0\xab\x47\x6f\x06\xda\xcc\x75" + "\xa7\x96\xb7\x92\xf3\xa0\x5f\xe6" + "\xba\x97\xe3\x2f\x97\x05\xb2\x99" + "\xa0\x09\x10\x98\x9c\xd3\x2e\xd1" + "\x7e\x2a\x30\x54\x3c\xb9\x33\xe3" + "\xf2\xaf\xd3\xa5\xee\xd0\x0b\x8a" + "\x19\x54\x0f\x02\x51\x1f\x91\xdf" + "\x71\x9c\xad\x77\x35\x28\x55\x6d" + "\xcd\x7a\xd9\xa3\x41\x98\x6b\x37" + "\x19\x0f\xbe\xae\x69\xb2\x25\x01" + "\xee\x0e\x51\x4b\x53\xea\x0f\x5f" + "\x85\x74\x79\x36\x32\x0a\x2a\x40" + "\xad\x6b\x78\x41\x54\x99\xe9\xc1" + "\x2b\x6c\x9b\x42\x21\xef\xe2\x50" + "\x56\x8d\x78\xdf\x58\xbe\x0a\x0f" + "\xfc\xfc\x0d\x2e\xd0\xcb\xa6\x0a" + "\xa8\xd9\x1e\xa9\xd4\x7c\x99\x88" + "\xcf\x11\xad\x1c\xd3\x04\x63\x55" + "\xef\x85\x0b\x69\xa1\x40\xf1\x75" + "\x24\xf4\xe5\x2c\xd4\x7a\x24\x50" + "\x8f\xa2\x71\xc9\x92\x20\xcd\xcf" + "\xda\x40\xbe\xf6\xfe\x1a\xca\xc7" + "\x4a\x80\x45\x55\xcb\xdd\xb7\x01" + "\xb0\x8d\xcb\xd2\xae\xbd\xa4\xd0" + "\x5c\x10\x05\x66\x7b\xd4\xff\xd9" + "\xc4\x23\x9d\x8d\x6b\x24\xf8\x3f" + "\x73\x4d\x5c\x2b\x33\x4c\x5e\x63" + "\x74\x6d\x03\xa1\x7a\x35\x65\x17" + "\x38\x7f\x3b\xc1\x69\xcf\x61\x34" + "\x30\x21\xaf\x97\x47\x12\x3f\xa1" + "\xa7\x50\xc5\x87\xfb\x3f\x70\x32" + "\x86\x17\x5f\x25\xe4\x74\xc6\xd0" + "\x9b\x39\xe6\xe1\x5a\xec\x8f\x40" + "\xce\xcc\x37\x3b\xd8\x72\x1c\x31" + "\x75\xa4\xa6\x89\x8c\xdd\xd6\xd2" + "\x32\x3d\xe8\xc3\x54\xab\x1f\x35" + "\x52\xb4\x94\x81\xb0\x37\x3a\x03" + "\xbb\xb1\x99\x30\xa5\xf8\x21\xcd" + "\x93\x5d\xa7\x13\xed\xc7\x49\x09" + "\x70\xda\x08\x39\xaa\x15\x9e\x45" + "\x35\x2b\x0f\x5c\x8c\x8b\xc9" + "\xa8\xb8\x9f\xfd\x37\x36\x31\x7e" + "\x34\x4f\xc1\xc0\xca\x8a\x22\xfd", + .clen = 735, } }; -static const struct cipher_testvec sm4_cfb_tv_template[] = { - { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.4 */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", +static const struct aead_testvec sm4_ccm_tv_template[] = { + { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.2 */ + .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" + "\xFE\xDC\xBA\x98\x76\x54\x32\x10", .klen = 16, - .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10" - "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1" - "\x78\x6f\x53\xd7\x25\x3a\x70\x56" - "\x9e\xd2\x58\xa8\x5a\x04\x67\xcc" - "\x92\xaa\xb3\x93\xdd\x97\x89\x95", - .len = 32, - }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 1 */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .iv = "\x02\x00\x00\x12\x34\x56\x78\x00" + "\x00\x00\x00\xAB\xCD\x00\x00\x00", + .ptext = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA" + "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB" + "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC" + "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD" + "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE" + "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" + "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE" + "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA", + .plen = 64, + .assoc = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF" + "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF" + "\xAB\xAD\xDA\xD2", + .alen = 20, + .ctext = "\x48\xAF\x93\x50\x1F\xA6\x2A\xDB" + "\xCD\x41\x4C\xCE\x60\x34\xD8\x95" + "\xDD\xA1\xBF\x8F\x13\x2F\x04\x20" + "\x98\x66\x15\x72\xE7\x48\x30\x94" + "\xFD\x12\xE5\x18\xCE\x06\x2C\x98" + "\xAC\xEE\x28\xD9\x5D\xF4\x41\x6B" + "\xED\x31\xA2\xF0\x44\x76\xC1\x8B" + "\xB4\x0C\x84\xA7\x4B\x97\xDC\x5B" + "\x16\x84\x2D\x4F\xA1\x86\xF5\x6A" + "\xB3\x32\x56\x97\x1F\xA1\x10\xF4", + .clen = 80, + }, { /* Generated from AES-CCM test vectors */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb" - "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd" - "\xee\xee\xee\xee\xff\xff\xff\xff" - "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", - .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16" - "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7" - "\x69\xd4\xc5\x4e\xd4\x33\xb9\xa0" - "\x34\x60\x09\xbe\xb3\x7b\x2b\x3f", - .len = 32, - }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 2 */ - .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10" - "\x01\x23\x45\x67\x89\xab\xcd\xef", + .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e", + .plen = 23, + .ctext = "\x7b\xff\x4a\x15\xf5\x73\xce\x82" + "\x6e\xc2\x31\x1d\xe2\x53\x02\xac" + "\xa4\x48\xf9\xe4\xf5\x1f\x81\x70" + "\x18\xbc\xb6\x84\x01\xb8\xae", + .clen = 31, + }, { + .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" + "\x53\x14\x73\x66\x8d\x88\xf6\x80", .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb" - "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd" - "\xee\xee\xee\xee\xff\xff\xff\xff" - "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", - .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65" - "\x60\xd7\xf2\x65\x88\x70\x68\x49" - "\x0d\x9b\x86\xff\x20\xc3\xbf\xe1" - "\x15\xff\xa0\x2c\xa6\x19\x2c\xc5", - .len = 32, + .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" + "\x50\x20\xda\xe2\x00\x00\x00\x00", + .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" + "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" + "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" + "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", + .alen = 32, + .ctext = "\x23\x58\xce\xdc\x40\xb1\xcd\x92" + "\x47\x96\x59\xfc\x8a\x26\x4f\xcf", + .clen = 16, + }, { + .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" + "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", + .klen = 16, + .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81" + "\x7f\x88\x94\x68\x00\x00\x00\x00", + .alen = 0, + .ptext = "\x00", + .plen = 0, + .ctext = "\x72\x7e\xf5\xd6\x39\x7a\x2b\x43", + .clen = 8, + }, { + .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" + "\xa4\x48\x93\x39\x26\x71\x4a\xc6", + .klen = 16, + .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" + "\x57\xba\xfd\x9e\x00\x00\x00\x00", + .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" + "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" + "\xa4\xf0\x13\x05\xd1\x77\x99\x67" + "\x11\xc4\xc6\xdb\x00\x56\x36\x61", + .alen = 32, + .ptext = "\x00", + .plen = 0, + .ctext = "\xb0\x9d\xc6\xfb\x7d\xb5\xa1\x0e", + .clen = 8, + }, { + .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" + "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b", + .klen = 16, + .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f" + "\x44\x89\x40\x7b\x00\x00\x00\x00", + .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" + "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" + "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" + "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe", + .alen = 32, + .ptext = "\xc2\x54\xc8\xde\x78\x87\x77\x40" + "\x49\x71\xe4\xb7\xe7\xcb\x76\x61" + "\x0a\x41\xb9\xe9\xc0\x76\x54\xab" + "\x04\x49\x3b\x19\x93\x57\x25\x5d", + .plen = 32, + .ctext = "\xc9\xae\xef\x1d\xf3\x2c\xd3\x38" + "\xc9\x7f\x7e\x28\xe8\xaa\xb3\x60" + "\x49\xdc\x66\xca\x7b\x3d\xe0\x3c" + "\xcb\x45\x9c\x1b\xb2\xbe\x07\x90" + "\x87\xa6\x6b\x89\x0d\x0f\x90\xaa" + "\x7d\xf6\x5a\x9a\x68\x2b\x81\x92", + .clen = 48, + }, { + .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59" + "\x04\x38\x77\xb0\xb9\xad\xb4\x38", + .klen = 16, + .iv = "\x02\xff\xff\xff\xff\x00\x00\xff" + "\xff\xff\x00\xff\xff\x00\x00\x00", + .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" + "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" + "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" + "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe" + "\xc8\xf3\x5c\x52\x10\x63", + .alen = 38, + .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f" + "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0" + "\x58\x83\xf0\xc3\x70\x14\xc0\x5b" + "\x3f\xec\x1d\x25\x3c\x51\xd2\x03" + "\xcf\x59\x74\x1f\xb2\x85\xb4\x07" + "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb" + "\xaf\x08\x44\xbd\x6f\x91\x15\xe1" + "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50" + "\x59\xa9\x97\xab\xbb\x0e\x74\x5c" + "\x00\xa4\x43\x54\x04\x54\x9b\x3b" + "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08" + "\xae\xe6\x10\x3f\x32\x65\xd1\xfc" + "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3" + "\x35\x23\xf4\x20\x41\xd4\xad\x82" + "\x8b\xa4\xad\x96\x1c\x20\x53\xbe" + "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72" + "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7" + "\xad\x49\x3a\xae\x98\xce\xa6\x66" + "\x10\x30\x90\x8c\x55\x83\xd7\x7c" + "\x8b\xe6\x53\xde\xd2\x6e\x18\x21" + "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73" + "\x57\xcc\x89\x09\x75\x9b\x78\x70" + "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5" + "\xfa\x70\x04\x70\xc6\x96\x1c\x7d" + "\x54\x41\x77\xa8\xe3\xb0\x7e\x96" + "\x82\xd9\xec\xa2\x87\x68\x55\xf9" + "\x8f\x9e\x73\x43\x47\x6a\x08\x36" + "\x93\x67\xa8\x2d\xde\xac\x41\xa9" + "\x5c\x4d\x73\x97\x0f\x70\x68\xfa" + "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9" + "\x78\x1f\x51\x07\xe3\x9a\x13\x4e" + "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7" + "\xab\x19\x37\xd9\xba\x76\x5e\xd2" + "\xf2\x53\x15\x17\x4c\x6b\x16\x9f" + "\x02\x66\x49\xca\x7c\x91\x05\xf2" + "\x45\x36\x1e\xf5\x77\xad\x1f\x46" + "\xa8\x13\xfb\x63\xb6\x08\x99\x63" + "\x82\xa2\xed\xb3\xac\xdf\x43\x19" + "\x45\xea\x78\x73\xd9\xb7\x39\x11" + "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81" + "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79" + "\xa4\x47\x7d\x80\x20\x26\xfd\x63" + "\x0a\xc7\x7e\x6d\x75\x47\xff\x76" + "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b" + "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1" + "\x54\x03\xa4\x09\x0c\x37\x7a\x15" + "\x23\x27\x5b\x8b\x4b\xa5\x64\x97" + "\xae\x4a\x50\x73\x1f\x66\x1c\x5c" + "\x03\x25\x3c\x8d\x48\x58\x71\x34" + "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5" + "\xb6\x19\x2b\x84\x2a\x20\xd1\xea" + "\x80\x6f\x96\x0e\x05\x62\xc7\x78" + "\x87\x79\x60\x38\x46\xb4\x25\x57" + "\x6e\x16\x63\xf8\xad\x6e\xd7\x42" + "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a" + "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22" + "\x86\x5c\x74\x3a\xeb\x24\x26\xc7" + "\x09\xfc\x91\x96\x47\x87\x4f\x1a" + "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24" + "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a" + "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5" + "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb" + "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe" + "\x0b\x63\xde\x87\x42\x79\x8a\x68" + "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f" + "\x9d\xd1\xc7\x45\x90\x08\xc9\x83" + "\xe9\x83\x84\xcb\x28\x69\x09\x69" + "\xce\x99\x46\x00\x54\xcb\xd8\x38" + "\xf9\x53\x4a\xbf\x31\xce\x57\x15" + "\x33\xfa\x96\x04\x33\x42\xe3\xc0" + "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6" + "\x19\x95\xd0\x0e\x82\x07\x63\xf9" + "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9" + "\xb5\x9f\x23\x28\x60\xe7\x20\x51" + "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2" + "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb" + "\x78\xc6\x91\x22\x40\x91\x80\xbe" + "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9" + "\x67\x10\xa4\x83\x98\x79\x23\xe7" + "\x92\xda\xa9\x22\x16\xb1\xe7\x78" + "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37" + "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9" + "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d" + "\x48\x11\x06\xbb\x2d\xf2\x63\x88" + "\x3f\x73\x09\xe2\x45\x56\x31\x51" + "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9" + "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66" + "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23" + "\x59\xfa\xfa\xaa\x44\x04\x01\xa7" + "\xa4\x78\xdb\x74\x3d\x8b\xb5", + .plen = 719, + .ctext = "\xc5\x50\x85\x02\x72\xa8\xb3\x62" + "\xf9\xcd\x77\x7b\x43\xa5\x04\x70" + "\x68\x40\x57\x21\x1c\xfe\xef\x05" + "\x4d\xb8\x44\xba\x59\xea\x62\x32" + "\xcb\x6b\x6a\x39\x9b\xf3\xe5\xa4" + "\x36\x38\xde\x7d\xcf\xb6\xcd\xe3" + "\x89\xbf\x37\xc9\x96\x3c\x70\x10" + "\x92\x47\xcc\xac\x6f\xf8\x55\x9a" + "\x26\x43\x34\xb4\x92\x7d\x68\xfc" + "\x60\x37\x74\x2a\x55\xba\xc7\xd7" + "\x98\x69\xb7\xcf\x42\xfd\xb2\x10" + "\xa0\x59\xe1\x2c\x73\x66\x12\x97" + "\x85\x8b\x28\xcc\x29\x02\x15\x89" + "\x23\xd3\x32\x92\x87\x57\x09\x13" + "\x04\x7e\x8b\x6c\x3a\xc1\x4e\x6c" + "\xe1\x9f\xc8\xcc\x47\x9c\xd8\x10" + "\xf4\xb7\x5c\x30\x7a\x8b\x0f\x01" + "\x52\x38\x02\x92\x99\xac\x03\x90" + "\x18\x32\x2d\x21\x6a\x0a\x2a\xe7" + "\xc2\xcc\x15\x84\x4e\x2b\x0b\x3a" + "\x4c\xdc\xb0\x6b\x10\xd1\x27\x10" + "\xf0\x4a\x5c\x43\xa0\x34\x34\x59" + "\x47\x43\x48\xcb\x69\xa7\xff\x52" + "\xb8\xca\x23\x09\x07\xd7\xc5\xe4" + "\x2a\x4f\x99\xd5\x83\x36\x2a\x2d" + "\x59\xd0\xca\xb0\xfa\x40\x8c\xab" + "\xdf\x69\x08\xd9\x79\x1d\xde\xa8" + "\x0b\x34\x74\x4d\xf5\xa0\x4c\x81" + "\x7f\x93\x06\x40\x24\xfe\x7d\xcd" + "\xe4\xfe\xf8\xf8\x30\xce\xd0\x5d" + "\x70\xfd\x0d\x5a\x78\x85\x74\x2d" + "\xe4\xb5\x40\x18\x99\x11\xe4\x6a" + "\xdf\xfa\x4f\x25\x2c\xde\x15\xb7" + "\x12\xd8\xc6\x90\x0d\x0f\xc9\xfb" + "\x21\xf1\xed\xfe\x98\xe1\x03\xe2" + "\x5c\xef\xb6\xc7\x87\x77\x0e\xcd" + "\xff\x78\x94\xc9\xbe\xd3\x47\xf7" + "\x8d\x37\x48\x01\x42\xe2\x17\x96" + "\xfc\xc0\xcb\x7b\x7b\x57\xaf\x3b" + "\xc9\xd0\x94\xce\x5e\x1b\xa9\x47" + "\x02\x4d\x74\xcc\x45\x1d\xd3\x2d" + "\x5f\x4f\x7f\xf2\x4b\xf9\x59\xee" + "\x9e\x9e\xb9\x95\x29\x19\xd1\x5f" + "\x72\xab\x8d\xf1\x28\xd1\x1c\xae" + "\xc2\xba\xf7\x22\x84\x2c\x83\x51" + "\x03\xad\xa3\xef\x81\xa7\xdc\xf1" + "\x44\x51\x50\x96\x70\xd1\xe5\x47" + "\x57\xf9\x30\x90\xe4\xbf\xfc\x75" + "\x14\xaa\x4d\xb7\xb1\xe7\x79\x33" + "\x43\xc2\x5c\xc1\xbc\x09\x92\x0f" + "\xa7\xaf\x68\x51\x51\xec\x0b\xc3" + "\x3d\x2b\x94\x30\x45\x29\x1b\x9e" + "\x70\x56\xf8\xd6\x67\x2d\x39\x3b" + "\x3c\xd2\xd0\xd3\xdc\x7d\x84\xe9" + "\x06\x31\x98\xa6\x5c\xbf\x10\x58" + "\xce\xbb\xa7\xe1\x65\x7e\x51\x87" + "\x70\x46\xb4\x7f\xf9\xec\x92\x1c" + "\x9b\x24\x49\xc1\x04\xbe\x1c\x5f" + "\xcc\xb3\x33\x8c\xad\xe7\xdc\x32" + "\x54\xa2\x0d\x83\x0f\x3c\x12\x5d" + "\x71\xe3\x9c\xae\x71\xa3\x2a\x10" + "\xc5\x91\xb4\x73\x96\x60\xdb\x5d" + "\x1f\xd5\x9a\xd2\x69\xc3\xd7\x4b" + "\xa2\x66\x81\x96\x4a\xaa\x02\xd6" + "\xd5\x44\x9b\x42\x3a\x15\x5f\xe7" + "\x4d\x7c\xf6\x71\x4a\xea\xe8\x43" + "\xd7\x68\xe4\xbc\x05\x87\x49\x05" + "\x3b\x47\xb2\x6d\x5f\xd1\x11\xa6" + "\x58\xd4\xa2\x45\xec\xb5\x54\x55" + "\xd3\xd6\xd2\x6a\x8b\x21\x9e\x2c" + "\xf1\x27\x4b\x5b\xe3\xff\xe0\xfd" + "\x4b\xf1\xe7\xe2\x84\xf2\x17\x37" + "\x11\x68\xc4\x92\x4b\x6b\xef\x8e" + "\x75\xf5\xc2\x7d\x5c\xe9\x7c\xfc" + "\x2b\x00\x33\x0e\x7d\x69\xd8\xd4" + "\x9b\xa8\x38\x54\x7e\x6d\x23\x51" + "\x2c\xd6\xc4\x58\x23\x1c\x22\x2a" + "\x59\xc5\x9b\xec\x9d\xbf\x03\x0f" + "\xb3\xdd\xba\x02\x22\xa0\x34\x37" + "\x19\x56\xc2\x5b\x32\x1d\x1e\x66" + "\x68\xf4\x47\x05\x04\x18\xa7\x28" + "\x80\xf2\xc7\x99\xed\x1e\x72\x48" + "\x8f\x97\x5d\xb3\x74\x42\xfd\x0c" + "\x0f\x5f\x29\x0c\xf1\x35\x22\x90" + "\xd6\x7c\xb8\xa3\x2a\x89\x38\x71" + "\xe9\x7a\x55\x3c\x3b\xf2\x6e\x1a" + "\x22\x8f\x07\x81\xc1\xe1\xf1\x76" + "\x2a\x75\xab\x86\xc4\xcc\x52\x59" + "\x83\x19\x5e\xb3\x53\xe2\x81\xdf" + "\xe6\x15\xb3\xba\x0c\x0e\xba" + "\xa9\x2c\xed\x51\xd5\x06\xc8\xc6" + "\x4b\x9f\x5d\x1b\x61\x31\xad\xf4", + .clen = 735, + } +}; + +static const struct hash_testvec sm4_cbcmac_tv_template[] = { + { + .key = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88" + "\x77\x66\x55\x44\x33\x22\x11\x00", + .plaintext = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .digest = "\x97\xb4\x75\x8f\x84\x92\x3d\x3f" + "\x86\x81\x0e\x0e\xea\x14\x6d\x73", + .psize = 16, + .ksize = 16, + }, { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xBA\x98\x76\x54\x32\x10", + .plaintext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb" + "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xee", + .digest = "\xc7\xdb\x17\x71\xa1\x5c\x0d\x22" + "\xa3\x39\x3a\x31\x88\x91\x49\xa1", + .psize = 33, + .ksize = 16, + }, { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xBA\x98\x76\x54\x32\x10", + .plaintext = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16" + "\xf9\xdd\xbe\x91\x73\x53\x37\x1a" + "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c" + "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11" + "\xf0\xd8\xbc\x96\x73\x5c\x34\x11" + "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f" + "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17" + "\xfd\xdb\xb1\x9b\x76\x5c\x37", + .digest = "\x9b\x07\x88\x7f\xd5\x95\x23\x12" + "\x64\x0a\x66\x7f\x4e\x25\xca\xd0", + .psize = 63, + .ksize = 16, + } +}; + +static const struct hash_testvec sm4_cmac128_tv_template[] = { + { + .key = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88" + "\x77\x66\x55\x44\x33\x22\x11\x00", + .plaintext = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .digest = "\x00\xd4\x63\xb4\x9a\xf3\x52\xe2" + "\x74\xa9\x00\x55\x13\x54\x2a\xd1", + .psize = 16, + .ksize = 16, + }, { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xBA\x98\x76\x54\x32\x10", + .plaintext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb" + "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xee", + .digest = "\x8a\x8a\xe9\xc0\xc8\x97\x0e\x85" + "\x21\x57\x02\x10\x1a\xbf\x9c\xc6", + .psize = 33, + .ksize = 16, + }, { + .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xfe\xdc\xBA\x98\x76\x54\x32\x10", + .plaintext = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16" + "\xf9\xdd\xbe\x91\x73\x53\x37\x1a" + "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c" + "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11" + "\xf0\xd8\xbc\x96\x73\x5c\x34\x11" + "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f" + "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17" + "\xfd\xdb\xb1\x9b\x76\x5c\x37", + .digest = "\x5f\x14\xc9\xa9\x20\xb2\xb4\xf0" + "\x76\xe0\xd8\xd6\xdc\x4f\xe1\xbc", + .psize = 63, + .ksize = 16, + } +}; + +static const struct hash_testvec sm4_xcbc128_tv_template[] = { + { /* Generated from AES-XCBC128 test vectors */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = zeroed_string, + .digest = "\xa9\x9a\x5c\x44\xe2\x34\xee\x2c" + "\x9b\xe4\x9d\xca\x64\xb0\xa5\xc4", + .psize = 0, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02", + .digest = "\x17\x27\x62\xf3\x8b\x88\x1d\xc0" + "\x97\x35\x9c\x3e\x9f\x27\xb7\x83", + .psize = 3, + .ksize = 16, + } , { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .digest = "\xda\x45\xd1\xac\xec\x4d\xab\x46" + "\xdd\x59\xe0\x44\xff\x59\xd5\xfc", + .psize = 16, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13", + .digest = "\xbe\x24\x5d\x81\x8c\x8a\x10\xa4" + "\x8e\xc2\x16\xfa\xa4\x83\xc9\x2a", + .psize = 20, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .digest = "\x91\x82\x31\x56\xd5\x77\xa4\xc5" + "\x88\x2d\xce\x3a\x87\x5e\xbd\xba", + .psize = 32, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21", + .digest = "\x2a\xae\xa5\x24\x0c\x12\x9f\x5f" + "\x55\xfb\xae\x35\x13\x0d\x22\x2d", + .psize = 34, + .ksize = 16, } }; @@ -14465,104 +15934,6 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { }, }; -static const struct cipher_testvec aes_cfb_tv_template[] = { - { /* From NIST SP800-38A */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" - "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" - "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" - "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" - "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" - "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" - "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" - "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" - "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" - "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" - "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" - "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" - "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", - .len = 64, - }, { - .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" - "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" - "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", - .klen = 24, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" - "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" - "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" - "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" - "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" - "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" - "\x34\xc2\x59\x09\xc9\x9a\x41\x74" - "\x67\xce\x7f\x7f\x81\x17\x36\x21" - "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" - "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" - "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" - "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" - "\x42\xae\x8f\xba\x58\x4b\x09\xff", - .len = 64, - }, { - .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" - "\x2b\x73\xae\xf0\x85\x7d\x77\x81" - "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" - "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", - .klen = 32, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" - "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" - "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" - "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" - "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" - "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" - "\x7e\xcd\x84\x86\x98\x5d\x38\x60" - "\x39\xff\xed\x14\x3b\x28\xb1\xc8" - "\x32\x11\x3c\x63\x31\xe5\x40\x7b" - "\xdf\x10\x13\x24\x15\xe5\x4b\x92" - "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" - "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" - "\x20\x31\x62\x3d\x55\xb1\xe4\x71", - .len = 64, - }, { /* > 16 bytes, not a multiple of 16 bytes */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" - "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" - "\xc8", - .len = 17, - }, { /* < 16 bytes */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", - .len = 7, - }, -}; - static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN @@ -18253,55 +19624,6 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = { }, }; -static const struct cipher_testvec aes_ofb_tv_template[] = { - { /* From NIST Special Publication 800-38A, Appendix F.5 */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08" - "\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" - "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" - "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" - "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" - "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" - "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" - "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" - "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5" - "\x3c\x52\xda\xc5\x4e\xd8\x25" - "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43" - "\x44\xf7\xa8\x22\x60\xed\xcc" - "\x30\x4c\x65\x28\xf6\x59\xc7\x78" - "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e", - .len = 64, - }, { /* > 16 bytes, not a multiple of 16 bytes */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" - "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" - "\xae", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" - "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" - "\x77", - .len = 17, - }, { /* < 16 bytes */ - .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" - "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", - .klen = 16, - .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", - .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", - .len = 7, - } -}; - static const struct aead_testvec aes_gcm_tv_template[] = { { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ .key = zeroed_string, @@ -21163,136 +22485,6 @@ static const struct aead_testvec aegis128_tv_template[] = { }; /* - * All key wrapping test vectors taken from - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip - * - * Note: as documented in keywrap.c, the ivout for encryption is the first - * semiblock of the ciphertext from the test vector. For decryption, iv is - * the first semiblock of the ciphertext. - */ -static const struct cipher_testvec aes_kw_tv_template[] = { - { - .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2" - "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6", - .klen = 16, - .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea" - "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f", - .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3" - "\xf5\x6f\xab\xea\x25\x48\xf5\xfb", - .len = 16, - .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d", - .generates_iv = true, - }, { - .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" - "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71" - "\x03\x86\xf9\x32\x78\x6e\xf7\x96" - "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f", - .klen = 32, - .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa" - "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa", - .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15" - "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43", - .len = 16, - .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1", - .generates_iv = true, - }, -}; - -/* - * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) - * test vectors, taken from Appendix B.2.9 and B.2.10: - * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf - * Only AES-128 is supported at this time. - */ -static const struct cprng_testvec ansi_cprng_aes_tv_template[] = { - { - .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" - "\xed\x06\x1c\xab\xb8\xd4\x62\x02", - .klen = 16, - .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" - "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9", - .dtlen = 16, - .v = "\x80\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .vlen = 16, - .result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55" - "\x84\x79\x66\x85\xc1\x2f\x76\x41", - .rlen = 16, - .loops = 1, - }, { - .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" - "\xed\x06\x1c\xab\xb8\xd4\x62\x02", - .klen = 16, - .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" - "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa", - .dtlen = 16, - .v = "\xc0\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .vlen = 16, - .result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c" - "\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d", - .rlen = 16, - .loops = 1, - }, { - .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" - "\xed\x06\x1c\xab\xb8\xd4\x62\x02", - .klen = 16, - .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" - "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb", - .dtlen = 16, - .v = "\xe0\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .vlen = 16, - .result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5" - "\x29\x14\x28\x81\xa9\x4d\x4e\xc7", - .rlen = 16, - .loops = 1, - }, { - .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" - "\xed\x06\x1c\xab\xb8\xd4\x62\x02", - .klen = 16, - .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" - "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc", - .dtlen = 16, - .v = "\xf0\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .vlen = 16, - .result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5" - "\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a", - .rlen = 16, - .loops = 1, - }, { - .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" - "\xed\x06\x1c\xab\xb8\xd4\x62\x02", - .klen = 16, - .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" - "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd", - .dtlen = 16, - .v = "\xf8\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .vlen = 16, - .result = "\x05\x25\x92\x46\x61\x79\xd2\xcb" - "\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8", - .rlen = 16, - .loops = 1, - }, { /* Monte Carlo Test */ - .key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5" - "\xd8\x2b\xe8\xc3\x72\x55\xc8\x48", - .klen = 16, - .dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b" - "\x67\xc9\x25\xfa\x70\x1f\x11\xac", - .dtlen = 16, - .v = "\x57\x2c\x8e\x76\x87\x26\x47\x97" - "\x7e\x74\xfb\xdd\xc4\x95\x01\xd1", - .vlen = 16, - .result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb" - "\xe4\x57\x90\xd5\xc3\xfc\x9b\x73", - .rlen = 16, - .loops = 10000, - }, -}; - -/* * SP800-90A DRBG Test vectors from * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip * @@ -22986,6 +24178,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = { /* * CAMELLIA test vectors. */ +static const struct hash_testvec camellia_cmac128_tv_template[] = { + { /* From draft-kato-ipsec-camellia-cmac96and128-01 */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = zeroed_string, + .digest = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9" + "\xa0\x0f\x89\x64\x80\x94\xfc\x71", + .psize = 0, + .ksize = 16, + }, { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a", + .digest = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5" + "\x6d\x7d\x45\xa9\x5e\xe1\x79\x93", + .psize = 16, + .ksize = 16, + }, { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11", + .digest = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61" + "\x44\xac\x18\x66\x13\x1d\x9f\x22", + .psize = 40, + .ksize = 16, + }, { + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .digest = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d" + "\x93\x9a\x8a\x4e\x19\x46\x6e\xe9", + .psize = 64, + .ksize = 16, + } +}; static const struct cipher_testvec camellia_tv_template[] = { { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" @@ -24858,6 +26097,1963 @@ static const struct cipher_testvec seed_tv_template[] = { } }; +/* + * ARIA test vectors + */ +static const struct cipher_testvec aria_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\xd7\x18\xfb\xd6\xab\x64\x4c\x73" + "\x9d\xa9\x5f\x3b\xe6\x45\x17\x78", + .len = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\x26\x44\x9c\x18\x05\xdb\xe7\xaa" + "\x25\xa4\x68\xce\x26\x3a\x9e\x79", + .len = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\xf9\x2b\xd7\xc7\x9f\xb7\x2e\x2f" + "\x2b\x8f\x80\xc1\x97\x2d\x24\xfc", + .len = 16, + } +}; + +static const struct cipher_testvec aria_cbc_tv_template[] = { + { + .key = "\x7c\x95\x0d\x07\xe6\x14\x98\x92" + "\x07\xac\x22\x41\x4d\x23\x27\x37", + .klen = 16, + .iv = "\x9d\xd5\x62\xce\x3d\x07\xd9\x89" + "\xf2\x78\x19\x4b\x65\x39\xc3\xc6", + .ptext = "\xcb\xbf\x47\x35\xc5\x37\xf0\x4e" + "\x85\x19\x21\x72\x33\x00\xde\x28", + .ctext = "\xf4\x80\x89\x89\x4a\x37\xda\x98" + "\x80\x52\x74\x75\xd9\xef\x58\xff", + .len = 16, + }, { + .key = "\x8f\xb9\x8d\xc9\xd7\x99\xfe\x7d" + "\xeb\x14\xaa\x65\xaf\x8c\x38\x1a", + .klen = 16, + .iv = "\xb1\x67\x46\x57\x0c\x64\x65\xf2" + "\x8c\x2f\x65\x11\x12\x33\xd4\x9a", + .ptext = "\x3a\xaf\xc1\xeb\x3c\x0c\xc5\xcc" + "\x10\x6e\x45\xa1\xd6\x89\xf1\xe5" + "\x74\xb6\x90\xd3\x81\x45\x00\x66" + "\x62\x15\x78\x84\xb2\x63\x11\x76", + .ctext = "\x3d\x7d\x3a\xeb\x23\x85\x3e\x72" + "\x12\x45\xbb\x5b\x42\x99\xec\xa0" + "\xa2\xbe\x75\xd6\xb1\xd8\xea\x6f" + "\x97\xfe\xfd\xcc\xfc\x08\x38\x00", + .len = 32, + }, { + .key = "\xe8\xe0\x85\x9c\x33\x06\x36\x5f" + "\xa9\xab\x72\x66\xa1\xd7\xf5\x0d", + .klen = 16, + .iv = "\x5d\xd3\xaf\x13\xed\x82\xc8\x92" + "\x4f\xf4\xe2\x35\xdb\x39\x9e\xa5", + .ptext = "\xdf\x73\x61\x44\x86\x2f\x58\x1e" + "\xfe\xf6\xb9\x1d\xd9\x1e\x4c\x7c" + "\xb4\xe6\x2b\x7d\x17\xc3\xc6\x5f" + "\x9d\xf4\x29\x8a\x55\x5c\x82\x0e" + "\x67\x91\xdd\x4b\xfb\x31\x33\xf1" + "\x56\x75\xa3\x2c\x46\x08\xff\x18", + .ctext = "\x85\x07\x8c\x88\x70\x7b\x39\xb8" + "\xfd\x1d\xa1\xd0\x89\x5f\x3f\x85" + "\x18\x5a\xde\x64\xbd\x54\xd5\x67" + "\xd1\x27\x4c\x98\x82\x76\xea\x22" + "\x52\x98\x79\xb4\x1d\xe8\x16\xd0" + "\xc6\xea\xf7\xbb\x38\x89\xf2\x5d", + .len = 48, + }, { + .key = "\xc1\x19\x8a\x7b\xc9\xaf\x00\xb3" + "\x92\x3c\xd7\xed\xe7\x76\xc5\x98", + .klen = 16, + .iv = "\xca\x62\x82\x1a\x5b\xb1\xcf\xc1" + "\xfb\x50\xb7\xfc\xb0\x3b\x15\xcb", + .ptext = "\xcb\x92\x56\x74\xc9\xee\x80\x78" + "\x78\xf5\x73\xc5\x5b\x2c\x70\x2d" + "\x4e\x0d\xd7\x17\x6d\x5a\x35\x74" + "\x33\xb0\x7d\xf5\xdf\x5f\x96\x7b" + "\x1c\x79\x16\xd0\xe0\x29\x4e\x94" + "\x95\x46\x86\x7a\x77\x28\x89\xb4" + "\x3d\xbb\x65\xab\xfb\xd1\x6c\xf4" + "\x47\xbd\x7e\x7f\x9b\x1d\x8b\x12", + .ctext = "\x69\xd2\x56\xdf\xa8\x1a\x97\xbd" + "\x69\xb5\xbb\x6b\x29\x1d\x5f\x0f" + "\xdf\x5f\x63\xc0\x83\x0b\xd7\xb1" + "\x31\x2d\xbf\x73\xe1\xe5\x5d\x0e" + "\x0c\x8d\xc4\x8a\xa9\xbd\x5f\xc7" + "\xb5\x61\xa0\x2b\x90\x64\x1a\xde" + "\xd2\xe1\x61\xb9\xce\xf4\x0b\x1c" + "\x9c\x43\x69\x6d\xb2\x32\x98\x44", + .len = 64, + }, { + .key = "\xfa\xf7\x53\xf6\xd6\x08\x70\xf1" + "\x32\x58\x97\x74\x04\x12\x1b\x14", + .klen = 16, + .iv = "\xdd\x93\xb2\x3e\xcb\xc1\x7c\x27" + "\x7f\x9e\x41\x03\xab\x1d\xfb\x77", + .ptext = "\xae\x34\x94\x50\x73\x32\xf0\x75" + "\x96\x53\x2e\x1a\xc9\x91\x2b\x37" + "\x77\xbe\x48\x39\xa7\xd0\x6e\xf7" + "\x22\x7c\x4f\xe7\xd8\x06\xee\x92" + "\x80\x57\x61\x45\x7f\x50\xd5\x0a" + "\x0b\x5e\xd4\xd6\x90\x4e\xc3\x04" + "\x52\x63\xaf\x02\x55\xa6\x49\x4b" + "\x7a\x7e\x2e\x95\xea\x80\x6c\x4b" + "\xb7\x88\x42\x3d\xc1\x09\x28\x97" + "\xd7\xa1\x0f\x0f\x1f\xf1\xea\x63", + .ctext = "\x6b\x83\x00\xf1\x79\xb2\x23\xbf" + "\x17\x26\x8a\xef\xd3\xe1\x0e\x82" + "\x5b\xc7\xde\x3e\x39\x72\x2d\xb0" + "\xad\x25\x3b\xe6\x3b\x9f\xe9\x4b" + "\x6e\xe8\x77\xf5\x9d\x7d\x00\xae" + "\x73\x7b\x81\xff\xe3\x55\x8e\x90" + "\xdf\xe4\xcd\xd5\xdc\x16\x8b\x7a" + "\xe5\x04\x92\x18\xff\xcc\x63\x1b" + "\x53\xf3\x26\x44\x5c\x48\x1d\xa2" + "\x1f\x3f\xe0\x8b\x8f\x6f\xc2\x38", + .len = 80, + }, { + .key = "\xb8\xab\x6d\x03\x9d\xec\x15\x0a" + "\xcd\xcd\x68\x73\xa9\x35\x7e\x8a", + .klen = 16, + .iv = "\x9d\xf1\xc0\xa0\x02\x06\xf0\x03" + "\x43\x45\x6a\x2e\x3f\x21\xa9\x3c", + .ptext = "\xef\xbe\x0c\xa3\x49\x4a\xda\x1e" + "\x64\x90\x85\xeb\xdc\xca\x2b\x37" + "\x78\xb7\x62\xd7\x0a\xee\x35\x38" + "\x97\x72\x6a\x99\xb8\x86\x07\x77" + "\x40\xc3\x14\x49\x1f\x67\xa1\x6e" + "\x87\xf0\x0b\x64\x4d\xea\x7c\x3a" + "\x91\x05\xb1\x48\xa1\x6a\x00\x1d" + "\x1b\x4f\x99\xb9\x52\xc9\x0c\xfd" + "\xf3\xe2\x0b\x5f\xe9\xec\x71\xe2" + "\x7d\x15\x84\x46\xc2\x3b\x77\x7b" + "\x30\x01\x34\x5c\x8f\x22\x58\x9a" + "\x17\x05\x7e\xf6\xd5\x92\xc0\xb4", + .ctext = "\x79\x50\x9b\x34\xd7\x22\x9a\x72" + "\x61\xd7\xd8\xa9\xdb\xcf\x2f\xb0" + "\x81\x11\xe3\xed\xa0\xe4\xbd\x8d" + "\xe6\xf2\x52\x52\x40\xec\x9f\x3b" + "\xd4\x48\xc6\xdf\xfd\x36\x90\x8a" + "\x2f\x3b\xb0\xfb\xf4\x2b\x99\xa5" + "\xb2\x39\xc7\x52\x57\x2b\xbc\xd7" + "\x3f\x06\x10\x15\x2e\xf7\xaa\x79" + "\xd6\x6a\xe5\x4e\x2d\x0f\x5f\xaf" + "\xf9\x5a\x63\x28\x33\xf0\x85\x8a" + "\x06\x45\xce\x73\xaa\x96\x1d\xcc" + "\x6e\xb9\x25\xb8\x4c\xfe\xeb\x64", + .len = 96, + }, { + .key = "\x50\x45\x7b\x4c\x6d\x80\x53\x62" + "\x90\x26\x77\xf8\x04\x65\x26\xe3", + .klen = 16, + .iv = "\x9d\xd3\x73\x7b\x9b\xbd\x45\x97" + "\xd2\xbb\xa1\xb9\x08\x88\x2c\x85", + .ptext = "\x9f\x11\xeb\x78\x74\xcc\x4e\xd6" + "\x06\x4b\x6d\xe4\xdb\x11\x91\x58" + "\x1f\xa4\xf6\x0e\x8f\xe4\xcf\xfc" + "\x95\x9a\x8b\x68\xb4\x54\x57\x58" + "\x27\x71\xe4\x4b\xc5\x78\x6a\x26" + "\x28\xae\xed\x71\x0e\xe7\xbf\xc3" + "\xff\x9c\x46\x7b\x31\x3e\xff\xb1" + "\xa8\xca\xc3\x6d\xa1\x9e\x49\x16" + "\x31\x8b\xed\x2d\x2a\x2b\xaf\x3b" + "\x3e\x74\x7f\x07\x67\x8e\xb8\x0d" + "\x86\xe2\xea\x2c\x4a\x74\xdc\x9f" + "\x53\x72\xd1\x2e\x97\x0d\x0b\xa5" + "\x05\x87\x8e\x86\x69\x8d\x26\xfb" + "\x90\xc8\xab\x0e\xac\xaf\x84\x1c", + .ctext = "\x3c\x91\xab\x71\xe4\x77\x3e\xb0" + "\x7f\x20\x2e\xd0\xe1\xbe\xfd\x3c" + "\x06\x6c\x36\x75\x46\x27\xfd\x2d" + "\xba\x0f\xf0\x3c\x6d\x1e\x4b\x20" + "\xe9\x5e\x30\xd8\x03\xc6\xa0\x86" + "\xa8\xc7\xa4\x7f\x0e\x1f\x35\x55" + "\x24\x53\x02\xd5\x77\x30\x73\xdc" + "\xa5\xaf\x19\x92\x5b\x36\x86\x0e" + "\xcf\xf2\x5c\x00\xde\x92\xbf\x89" + "\x76\x46\xd5\x26\xb1\x8d\xa4\xef" + "\x61\x7e\x78\xb4\x68\xf5\x5b\x1d" + "\x39\x65\x32\x3a\xad\xff\x8b\x37" + "\x60\xc2\x8a\xaf\x48\x96\x8b\x9f" + "\x12\x6c\x70\x77\x95\xf3\x58\xb0", + .len = 112, + }, { + .key = "\xf9\x9f\x6a\x87\xa1\x2d\x6e\xac" + "\xde\xbb\x3e\x15\x5e\x49\xa4\xef", + .klen = 16, + .iv = "\xeb\x8e\x4f\xbe\x4b\x47\xd6\x4f" + "\x65\xd0\xfa\xee\xa6\xf1\x2c\xda", + .ptext = "\xa3\xfa\x4f\xf6\x00\x12\xbe\xc1" + "\x90\xcc\x91\x88\xbd\xfb\x1c\xdb" + "\x2b\xc8\xb9\x3d\x98\x01\xc8\x1f" + "\x07\xb4\xf3\x10\x1d\xfd\xb7\x2e" + "\xcb\x1c\x1f\xe0\x2d\xca\xd3\xc7" + "\xb2\xce\x52\xf1\x7e\xcb\x7c\x50" + "\x0c\x5c\x53\x6b\x18\x62\x02\x54" + "\xbc\x9d\x1f\xda\xd9\x7a\x2d\xff" + "\xb8\x2c\x65\xad\xf1\xfe\xb6\xa4" + "\x8c\xe8\x0a\xb7\x67\x60\xcb\x38" + "\xd7\x72\xa5\xb1\x92\x13\x8e\xd4" + "\xcd\xb3\x04\xb5\xa1\x11\x96\x37" + "\xb3\x53\xa6\xc4\x14\x56\x6d\x42" + "\x66\x43\x40\x42\x41\x63\x11\x7a" + "\xd5\x34\x38\x75\xd0\xbc\x74\x89" + "\x82\x1d\x2c\x0a\x3e\x6a\xfb\xbd", + .ctext = "\x09\x58\xf3\x22\xe5\x10\xf6\x3d" + "\xba\xb1\xfa\x5a\x16\xfe\xc5\x32" + "\x3d\x34\x59\x2e\x81\xde\x99\x2f" + "\xeb\x6a\x97\x86\x1f\x47\x8d\xe6" + "\x87\x79\x0e\xfe\xa4\xca\x09\xdc" + "\x24\x9b\xbb\xb1\x90\x33\xce\xd7" + "\x62\xfd\xfd\xa3\x65\x50\x07\x7c" + "\x4c\xa2\x10\xc7\x32\x0a\x0d\x5e" + "\x22\x29\x40\x71\xe5\xcc\x3a\x5b" + "\x5b\x53\x51\xa5\x5b\xc1\x76\x05" + "\x84\x6e\xe3\x58\x2b\xf2\x28\x76" + "\x5c\x66\x90\xfe\x63\x30\x1c\x45" + "\x26\x34\x80\xfe\x76\x87\x5b\xb1" + "\x63\x10\x09\xf6\x9d\x35\xcb\xee" + "\x3c\x60\x9d\x77\x5b\x36\x70\x09" + "\x4b\x63\x63\x90\x97\x3a\x6c\x8a", + .len = 128, + }, { + .key = "\x04\xb9\x6c\x8f\x5e\x79\x02\x87" + "\x88\x06\x7c\xfa\xd3\x7b\x56\xfe", + .klen = 16, + .iv = "\x4b\xc8\x93\x20\x98\x04\xba\x5a" + "\x22\x04\x1f\x3f\x79\x2c\x63\x79", + .ptext = "\xf3\x85\x3e\x75\x97\x10\x7c\x5d" + "\x39\x5a\x46\x47\xe7\x51\xa3\xac" + "\x84\x56\x3f\x1b\xb3\x93\x6a\x2e" + "\xf7\x8f\x63\xbe\x18\xff\xd7\x53" + "\xc8\xe0\xa5\xde\x86\xc2\xe4\xab" + "\xc3\x67\x27\x91\x43\x8c\xff\x6c" + "\xc7\x07\xc2\xcd\xe9\x12\x8b\xef" + "\x47\xe7\x82\xed\xe3\x8d\x5e\x33" + "\xca\xf1\x28\x32\xf4\x38\x41\x59" + "\x6c\x54\xa6\x40\xb0\xd5\x73\x26" + "\x5b\x02\xa6\x9d\x01\x29\x26\x84" + "\x5b\x33\x04\x36\xa4\x7b\x00\x01" + "\x42\xe1\x4f\xda\xa9\x1a\x9b\x4e" + "\x7d\x4a\x4c\xbc\xf6\xd4\x06\xc2" + "\x89\x70\x72\xf5\xc5\x7f\x42\xd5" + "\x7b\x9c\x6f\x00\x21\x74\xc5\xa5" + "\x78\xd7\xa2\x3c\x6d\x0f\xfb\x74" + "\x3d\x70\x9f\x6d\xdd\x30\xc0\x28", + .ctext = "\xc0\x49\x98\xb9\xf6\x58\xeb\x56" + "\x36\x76\x7a\x40\x7c\x27\x80\x62" + "\xe3\xcb\x9c\x87\x2c\x03\xc2\x0c" + "\x82\x00\x50\xd2\xe4\x61\x4d\x54" + "\x88\x10\x6f\x0a\xb4\x25\x57\xba" + "\xf0\x07\xe3\x55\x06\xb3\x72\xe9" + "\x2f\x9f\x1e\x50\xa8\x15\x69\x71" + "\xe3\xe5\x50\x32\xe5\xe0\x47\x0f" + "\x3a\xaa\x7d\xc0\x09\x0e\xdb\x1a" + "\xae\xb6\xa5\x87\x63\xd6\xbe\x8b" + "\xb2\x3d\x10\x1e\xb3\x68\xcf\x8a" + "\xe5\xa8\x89\xa9\xfe\x79\x13\x77" + "\xc4\x3f\x6f\x9f\xdd\x76\x5b\xf2" + "\x05\x67\x8a\x58\xb4\x31\xac\x64" + "\x6f\xc4\xc1\x6b\x08\x79\x3f\xe5" + "\x1c\x9a\x66\x3f\x7d\x1f\x18\xb1" + "\x07\xa5\x7b\x4f\x2c\x43\x33\x84" + "\xab\x1b\xc0\x7d\x49\x2f\x27\x9b", + .len = 144, + }, { + .key = "\x99\x79\xaf\x3c\xfb\xbd\xe7\xca" + "\xee\x4a\x4d\xb2\x23\x1e\xb6\x07", + .klen = 16, + .iv = "\xb4\xfc\xaa\xc1\x08\xbf\x68\xb2" + "\xf6\xef\x29\xbc\x2d\x92\xa9\x40", + .ptext = "\xd3\x44\xe4\xd9\x6c\x8a\x1d\x4b" + "\xfe\x64\x25\xb6\x72\x21\xda\x10" + "\x3e\x77\xee\xd1\x41\xd3\xea\xf0" + "\xee\xee\x72\x0f\xad\xa1\xca\xf3" + "\x7e\xfa\x99\x36\xe0\x8f\xed\x40" + "\xf1\x12\x80\x73\xd6\x26\x3a\xa6" + "\x5d\x71\xf6\xd5\xe1\xf3\x89\x16" + "\x6f\x96\x00\xcf\x26\x06\x2a\x27" + "\xe4\xc2\x57\xba\x1f\x74\x5e\x91" + "\x10\x7e\xe5\x51\x17\xd5\xdc\xb2" + "\x5b\x12\x4b\x33\xb1\xc6\x4e\x0d" + "\xbf\x0e\x5d\x65\x61\x68\xd1\xc5" + "\x4b\xc5\xa4\xcd\xf0\xe0\x79\x26" + "\xa3\xcd\xdc\xb8\xfc\xd5\xca\x1d" + "\x7e\x81\x74\x55\x76\xf5\x40\xbb" + "\x26\x7f\x11\x37\x23\x70\xc8\xb6" + "\xfc\x2b\x0b\xd7\x1c\x7b\x45\xe7" + "\xf2\x2a\xed\x10\x4f\xcf\x0c\xcd" + "\x0f\xe7\xf9\xa1\xfb\x27\x67\x09" + "\xee\x11\xa2\xaf\x37\xc6\x16\xe0", + .ctext = "\x60\xce\x9a\xdb\xb2\xe8\xa2\x64" + "\x35\x9c\x5b\x97\x21\x9b\x95\x89" + "\x7b\x89\x15\x01\x97\x8b\xec\x9b" + "\xb9\xce\x7d\xb9\x9d\xcc\xd0\xa0" + "\xda\x39\x5d\xfd\xb9\x51\xe7\x2f" + "\xe7\x9b\x73\x1b\x07\xfb\xfd\xbb" + "\xce\x84\x68\x76\x12\xc9\x6c\x38" + "\xc0\xdc\x67\x96\x5e\x63\xcf\xe5" + "\x57\x84\x7a\x14\x8c\xab\x38\x94" + "\x1c\x27\xc3\xe0\x03\x58\xfe\x98" + "\x97\xfc\x96\xba\x65\x87\x1e\x44" + "\xf8\x00\x91\x6a\x14\x05\xf3\xf9" + "\x8e\x3e\x7a\x3c\x41\x96\x15\x4f" + "\xa8\xc0\x73\x1f\x1b\xeb\xaf\xec" + "\xc4\x5a\x35\xed\x42\x2f\x47\xea" + "\xfd\x2f\x29\xf6\x0f\x58\x8b\x3d" + "\x15\x81\xe3\xa4\xa6\x5f\x33\x33" + "\xe9\x0d\x06\x4f\x7f\x89\x2c\x3d" + "\x18\x45\x1f\xd1\xc5\x74\xf7\x52" + "\x2f\x9b\x72\x3d\x1f\xad\x12\x1b", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0", + .klen = 24, + .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57" + "\x36\x36\x89\x09\xcd\xa8\xd3\x91", + .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0" + "\x51\xe3\x8c\xe9\x76\xcd\xff\x37", + .ctext = "\x2d\x8f\x39\x71\x0a\x2c\xc9\x93" + "\xb6\x1a\x5c\x53\x06\x4d\xaa\xcf", + .len = 16, + }, { + .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe" + "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .klen = 24, + .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1" + "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .ctext = "\xc1\x53\x86\xf8\x60\x5d\x72\x59" + "\x7e\xdf\xc8\xdb\x85\xd6\x9f\x2a" + "\xa1\xda\xe5\x85\x78\x4f\x1b\x6f" + "\x58\xf3\x2b\xff\x34\xe4\x97\x4e", + .len = 32, + }, { + .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4" + "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0", + .klen = 24, + .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2", + .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e" + "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86", + .ctext = "\x25\x5f\x66\x15\xb5\x62\xfb\x55" + "\xb3\x77\xa1\x7d\x03\xba\x86\x0a" + "\x0d\x5b\xbb\x06\xe9\xe2\xa8\x41" + "\xa3\x58\xd6\x4b\xcb\x7f\xd0\x15" + "\x3b\x02\x74\x5d\x4c\x4c\xb0\xa5" + "\x06\xc9\x59\x53\x2a\x36\xeb\x59", + .len = 48, + }, { + .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .klen = 24, + .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40", + .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa" + "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ctext = "\x91\x02\xa9\xd3\x4b\x9a\x8f\xe6" + "\x9f\xe4\x51\x57\xc9\x42\xda\x68" + "\xca\xf6\x54\x51\x90\xec\x20\x2e" + "\xab\x25\x6c\xd9\x8b\x99\xa6\x1c" + "\x72\xc9\x01\xd6\xbc\x2b\x26\x78" + "\x42\x00\x84\x0a\xdd\xa8\xd9\xb5" + "\xc6\xc8\x30\xb6\xab\xea\x71\x84" + "\xb2\x57\x97\x32\xdb\x35\x23\xd8", + .len = 64, + }, { + .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d", + .klen = 24, + .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38", + .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8" + "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41", + .ctext = "\x28\x23\x3a\x4a\x18\xb7\xb6\x05" + "\xd4\x1b\x6a\x9e\xa7\xf2\x38\x01" + "\x78\xd3\xb0\x1b\x95\x68\x59\xf1" + "\xc0\xed\x30\x46\x2e\xb9\xa6\xdc" + "\xde\xef\xa6\x85\x19\xfc\x4d\x36" + "\x5d\x24\x92\x62\x75\x32\x76\x6d" + "\x6d\xa9\x07\xe1\x4f\x59\x84\x1a" + "\x68\x9a\x07\x48\xd3\x86\xf6\xf1" + "\x5b\xf9\x35\xec\x7c\xaf\x47\x13" + "\x9c\xc9\x33\x12\x10\x2f\x94\x8a", + .len = 80, + }, { + .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50", + .klen = 24, + .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda" + "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e", + .ctext = "\x38\x5b\x16\xef\xb8\x8c\x74\x7a" + "\x55\x17\x71\xa7\x7d\x34\xd7\x6a" + "\xc6\x31\x55\x6f\xbb\x61\xf4\x12" + "\x81\x8c\x91\x0d\x10\xdb\xd5\x22" + "\x77\x36\x32\xb6\x77\xb1\x5e\x21" + "\xb5\xec\xf9\x64\x04\x90\x6f\xc6" + "\x8a\x86\x23\xb5\xfe\xa4\xb6\x84" + "\x91\xa1\x60\xe3\xd7\xf3\xb9\xda" + "\x96\x23\x4a\xb3\xab\x75\x84\x04" + "\x15\x1a\xbb\xe8\x02\x1e\x80\x7c" + "\xc1\x93\x01\x0f\x5c\x4a\xde\x85" + "\xbb\x93\x05\x66\x53\x74\x40\x56", + .len = 96, + }, { + .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb", + .klen = 24, + .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17", + .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95" + "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41", + .ctext = "\x4b\x56\xe0\xc2\x65\x2f\x7c\x6f" + "\xee\x22\xeb\x34\x1c\xa5\xb7\xc8" + "\x35\xd7\x51\xfd\x6a\xf4\xdd\xc3" + "\x38\xf4\xfc\x9d\x2e\xc2\x77\xb7" + "\x93\x8e\x8c\xb3\x44\x9b\xaf\xbb" + "\x99\xb9\xa8\x38\x1c\xfe\x63\xfb" + "\x1f\xa0\xaa\x35\x29\x7b\x87\x49" + "\x8e\x93\xa5\xb8\x5a\x85\x37\xa7" + "\x67\x69\x49\xbd\xc3\xfa\x89\x1c" + "\xf5\x60\x9b\xe7\x71\x96\x95\xd9" + "\x0b\x98\xe6\x74\x1d\xa3\xd9\x89" + "\x03\xe4\xf6\x66\xb3\x73\xb1\xac" + "\x9f\xee\x8f\xc2\x96\xcc\x97\x78" + "\x1b\x96\x63\x64\x00\x9c\x2d\x29", + .len = 112, + }, { + .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3", + .klen = 24, + .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe", + .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c" + "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a", + .ctext = "\x4d\x35\x70\xf1\x25\x02\x1d\x7f" + "\x9e\x0f\x5b\x4b\x65\xab\xcc\x6b" + "\x62\xab\x2b\xfa\xc0\x66\xee\x56" + "\xb4\x66\x95\x22\x84\x39\xd8\x3f" + "\x74\xba\x4f\x3f\xcd\xef\xcf\xf6" + "\x76\xeb\x9e\x8a\xec\x9c\x31\xa0" + "\x3e\x0c\xf9\xfa\x57\x90\xb4\x02" + "\xac\xc8\x28\xda\xa0\x05\xb7\x7e" + "\x75\x9c\x79\x36\xa9\x2f\x1a\x36" + "\x56\x77\xda\x74\xc7\xb3\xdf\xf3" + "\xb9\x83\x10\xf3\x6b\xe1\xdf\xcb" + "\x11\x70\xb1\xa0\x68\x48\x26\x95" + "\x10\x91\x94\xf3\xe9\x82\xb4\x8a" + "\xaa\xde\xf8\x9f\xce\x82\x47\x18" + "\x37\x5d\xda\x34\x74\x4d\x36\xbd" + "\xa5\x6c\xa4\xb3\x70\xad\x00\xbd", + .len = 128, + }, { + .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8", + .klen = 24, + .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e", + .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46" + "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c", + .ctext = "\xa1\x4a\x83\xb2\xe0\xef\x3d\x94" + "\xa4\x34\x66\x93\xb4\x89\x4e\x12" + "\xe5\x61\xc9\xea\xe0\x16\x96\x1a" + "\x3e\x94\x20\x81\xd4\x12\x7f\xf4" + "\xb8\x3f\xc9\xe2\x99\xb5\x0f\x9e" + "\x71\x86\x4f\x13\x78\x4e\xf1\x51" + "\xd4\x7d\x6e\x47\x31\x9a\xd8\xf7" + "\xb9\xb1\x17\xd0\xbd\xbf\x72\x86" + "\xb4\x58\x85\xf0\x05\x67\xc4\x00" + "\xca\xcb\xa7\x1a\x1d\x88\x29\xf4" + "\xe2\xf6\xdd\x5a\x3e\x5a\xbb\x29" + "\x48\x5a\x4a\x18\xcd\x5c\xf1\x09" + "\x5b\xbe\x1a\x43\x12\xc5\x6e\x6e" + "\x5e\x6d\x3b\x22\xf7\x58\xbd\xc8" + "\xb1\x04\xaf\x44\x9c\x2b\x98\x5a" + "\x14\xb7\x35\xb8\x9a\xce\x32\x28" + "\x1f\x8d\x08\x8a\xb9\x82\xf0\xa5" + "\x6a\x37\x29\xb6\x29\x3a\x53\x5e", + .len = 144, + }, { + .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57", + .klen = 24, + .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97", + .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66" + "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c", + .ctext = "\xd9\xed\xc8\xc7\x66\xcd\x06\xc5" + "\xc1\x25\x9b\xf5\x14\x71\x1d\x69" + "\xc9\x7c\x04\x40\xab\xc0\x44\xf4" + "\xa1\xe6\x57\x8b\x35\x62\x4e\x3f" + "\xce\x4a\x99\xcd\x95\xc4\xd1\xf3" + "\xbc\x25\xa2\x18\xe6\xd1\xf7\xc0" + "\x13\x98\x60\x4c\x5c\xb1\x4f\x7a" + "\xbc\x45\x12\x52\xe8\x71\xb0\xf1" + "\x18\xef\x6f\x8a\x63\x35\x17\xae" + "\x90\x31\x41\x9d\xf4\xdc\x35\xcc" + "\x49\x72\x10\x11\x3b\xe3\x40\x7a" + "\x8e\x21\x39\xd0\x5b\x82\xb1\xe9" + "\x0c\x37\x5a\x7c\x11\xcb\x96\xd9" + "\xd4\x1c\x47\x4b\x70\xcb\xca\x08" + "\x5f\x71\xe9\x48\xf6\x29\xd8\xbb" + "\x5c\xad\x9b\x23\x9f\x62\xaf\xef" + "\x8e\xd8\x99\x1d\x60\xad\xc3\x6f" + "\xed\x06\x1a\xec\xfa\xc0\x0f\x0d" + "\xb7\x00\x02\x45\x7c\x94\x23\xb6" + "\xd7\x26\x6a\x16\x62\xc4\xd9\xee", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0" + "\xfd\xab\x56\xa6\x6e\xda\x7c\x57", + .klen = 32, + .iv = "\x36\x36\x89\x09\xcd\xa8\xd3\x91" + "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0", + .ptext = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37" + "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe", + .ctext = "\x05\x31\x46\x6d\xb8\xf4\x92\x64" + "\x46\xfd\x0d\x96\x60\x01\xd7\x94", + .len = 16, + }, { + .key = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22" + "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .klen = 32, + .iv = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1", + .ptext = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d" + "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4", + .ctext = "\x24\x36\xe4\x14\xb7\xe1\x56\x8a" + "\xf3\xc5\xaf\x0e\xa7\xeb\xbd\xcd" + "\x2d\xe9\xd7\x19\xae\x24\x5d\x3b" + "\x1d\xfb\xdc\x21\xb3\x1a\x37\x0b", + .len = 32, + }, { + .key = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0" + "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2" + "\x39\x56\x34\x63\x2c\xc5\x51\x13", + .klen = 32, + .iv = "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e", + .ptext = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86" + "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .ctext = "\x2e\x73\x60\xec\xd3\x95\x78\xe8" + "\x0f\x98\x1a\xc2\x92\x49\x0b\x49" + "\x71\x42\xf4\xb0\xaa\x8b\xf8\x53" + "\x16\xab\x6d\x74\xc0\xda\xab\xcd" + "\x85\x52\x11\x20\x2c\x59\x16\x00" + "\x26\x47\x4a\xea\x08\x5f\x38\x68", + .len = 48, + }, { + .key = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40" + "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c", + .klen = 32, + .iv = "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa", + .ptext = "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44" + "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d" + "\xb2\x92\x83\x70\x1e\xa3\x97\xa6", + .ctext = "\xfb\xd3\xc3\x8b\xf7\x89\xcc\x31" + "\xb1\x7f\xc3\x91\xdc\x04\xc6\xd7" + "\x33\xbd\xe0\xee\x0c\xd5\x70\xed" + "\x1b\x1d\xad\x49\x6f\x5c\xa1\x68" + "\xd7\x03\xc9\x65\xa7\x90\x30\x2b" + "\x26\xeb\xf4\x7a\xac\xcc\x03\xe1" + "\x6a\xe5\xdb\x23\x10\x8a\xcd\x70" + "\x39\x4d\x7a\xc9\xcd\x62\xd1\x65", + .len = 64, + }, { + .key = "\x65\x53\x39\xeb\x53\x8f\xb1\x38" + "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8", + .klen = 32, + .iv = "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8", + .ptext = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41" + "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50" + "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ctext = "\xa2\x51\x28\xc2\x5e\x58\x1c\xaf" + "\x84\x92\x1c\xe1\x92\xf0\xf9\x9e" + "\xf2\xb3\xc6\x2b\x34\xd2\x8d\xa0" + "\xb3\xd7\x87\x56\xeb\xd9\x32\x6a" + "\xca\x90\x28\x26\x49\x34\xca\x41" + "\xce\xc5\x9e\xd6\xfe\x57\x71\x3c" + "\x98\xaf\xdd\xfc\x7d\xdf\x26\x7e" + "\xb7\x9c\xd5\x15\xe5\x81\x7a\x4f" + "\x4f\x4f\xe5\x77\xf2\x2e\x67\x68" + "\x52\xc1\xac\x28\x2c\x88\xf4\x38", + .len = 80, + }, { + .key = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95", + .klen = 32, + .iv = "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda", + .ptext = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e" + "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb" + "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17" + "\x58\x2b\x1d\x73\x9a\x9c\x63\x18", + .ctext = "\xd1\xce\xbe\xe0\x4a\x6e\x6d\x7f" + "\x89\x19\x28\xb1\xca\xe8\xc1\x9c" + "\x8c\x0b\x7d\x63\xfe\xff\x3d\xf4" + "\x65\x9e\xd6\xe7\x2f\x5a\xc1\x31" + "\x1e\xe7\x59\x27\x54\x92\xcc\xaa" + "\x5b\x3d\xeb\xe7\x96\xc1\x49\x54" + "\x18\xf3\x14\xaa\x56\x03\x28\x53" + "\xaa\x0a\x91\xdf\x92\x96\x9b\x06" + "\x1a\x24\x02\x09\xe7\xa6\xdc\x75" + "\xeb\x00\x1d\xf5\xf2\xa7\x4a\x9d" + "\x75\x80\xb7\x47\x63\xfc\xad\x18" + "\x85\x5f\xfc\x64\x03\x72\x38\xe7", + .len = 96, + }, { + .key = "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0", + .klen = 32, + .iv = "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95", + .ptext = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41" + "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3" + "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe" + "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c", + .ctext = "\x0b\x07\xdc\x6a\x47\x45\xd2\xb0" + "\xa3\xf2\x42\x2f\xa4\x79\x6b\x4c" + "\x53\x9c\x8a\x2f\x48\x9c\xf2\x89" + "\x73\x8b\xdd\x97\xde\x41\x06\xc8" + "\x8a\x30\x7a\xa9\x90\x4a\x43\xd0" + "\xd5\xee\x16\x51\x44\xda\xe4\xb8" + "\xe8\x5f\x6f\xef\x84\xf3\x44\x43" + "\xbd\xdc\xc3\xdf\x65\x2b\xaf\xf6" + "\xfe\xd0\x4a\x5b\x30\x47\x8c\xaf" + "\x8d\xed\x2d\x91\xa1\x03\x9a\x80" + "\x58\xdd\xaa\x8f\x3b\x6b\x39\x10" + "\xe5\x92\xbc\xac\xaa\x25\xa1\x13" + "\x7e\xaa\x03\x83\x05\x83\x11\xfe" + "\x19\x5f\x04\x01\x48\x00\x3b\x58", + .len = 112, + }, { + .key = "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2", + .klen = 32, + .iv = "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c", + .ptext = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a" + "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8" + "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e" + "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a", + .ctext = "\xfe\xba\x8f\x68\x47\x55\xaa\x61" + "\x48\xdd\xf3\x7c\xc4\xdc\xa6\x93" + "\x4e\x72\x3f\xc7\xd0\x2b\x9b\xac" + "\xc1\xb5\x95\xf8\x8e\x75\x62\x0c" + "\x05\x6a\x90\x76\x35\xed\x73\xf2" + "\x0f\x44\x3d\xaf\xd4\x00\xeb\x1d" + "\xad\x27\xf2\x2f\x55\x65\x91\x0f" + "\xe4\x04\x9c\xfb\x8a\x18\x22\x8e" + "\x21\xbe\x93\x09\xdd\x3e\x93\x34" + "\x60\x82\xcd\xff\x42\x10\xed\x43" + "\x3a\x4b\xb8\x5c\x6c\xa8\x9e\x1c" + "\x95\x6a\x17\xa7\xa3\xe0\x7d\xdb" + "\x6e\xca\xaf\xc1\x1f\xb2\x86\x15" + "\xf0\xc1\x55\x72\xf2\x74\x44\xeb" + "\x09\x09\x83\x8b\x2c\xc9\x63\x13" + "\x99\xe3\xe1\x4b\x5c\xf7\xb1\x04", + .len = 128, + }, { + .key = "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62", + .klen = 32, + .iv = "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46", + .ptext = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c" + "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57" + "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97" + "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01", + .ctext = "\xa5\x19\x33\xad\x2d\x1a\x7b\x34" + "\xb0\x21\x68\x0e\x20\x11\x7a\x37" + "\xef\x35\x33\x64\x31\x0a\x42\x77" + "\x2c\x7f\x1a\x34\xd6\x93\x2d\xe9" + "\x26\xb9\x15\xec\x4f\x83\xbd\x48" + "\x5b\xe9\x63\xea\x10\x3b\xec\xfb" + "\xb0\x5e\x81\x90\xf0\x07\x43\xc4" + "\xda\x54\x69\x98\x13\x5d\x93\x16" + "\xca\x06\x81\x64\x36\xbe\x36\xa2" + "\xd4\xd8\x48\x63\xc7\x53\x39\x93" + "\x6d\x6b\xd6\x49\x00\x72\x5e\x02" + "\xc7\x88\x61\x0f\x10\x88\xd4\x9e" + "\x17\x81\xa4\xdc\x43\x4e\x83\x43" + "\xd4\xc3\xd7\x25\x9a\xd4\x76\xde" + "\x88\xe3\x98\x5a\x0e\x80\x23\xfb" + "\x49\xb3\x83\xf6\xb9\x16\x00\x06" + "\xa5\x06\x24\x17\x65\xbb\x68\xa9" + "\x56\x6d\xeb\xcd\x3c\x14\xd2\x64", + .len = 144, + }, { + .key = "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53", + .klen = 32, + .iv = "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66", + .ptext = "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c" + "\xcd\x4b\xbf\x0f\xcb\x81\xd4\xec" + "\x1e\x07\x05\x4d\x5c\x6b\xba\xcc" + "\x43\xc7\xb1\xfe\xa8\xe9\x96\xb0" + "\xb1\xb2\xd4\x70\x44\xbc\xaa\x50" + "\xbf\x3f\x81\xe6\xea\x36\x7d\x97" + "\x2a\xbd\x52\x16\xf7\xbe\x59\x27" + "\x8f\xcc\xe3\xa9\xec\x4f\xcd\xd3" + "\xf4\xe2\x54\xbe\xf1\xf9\x2b\x23" + "\x40\xc7\xcb\x67\x4d\x5f\x0b\xd4" + "\xbf\x19\xf0\x2a\xef\x37\xc6\x56", + .ctext = "\x0a\x69\xd8\x67\x33\x2a\x2f\xa9" + "\x26\x79\x65\xd6\x75\x1e\x98\xe8" + "\x52\x56\x32\xbf\x67\x71\xf4\x01" + "\xb1\x6f\xef\xf9\xc9\xad\xb3\x49" + "\x7a\x4f\x24\x9a\xae\x06\x62\x26" + "\x3e\xe4\xa7\x6f\x5a\xbf\xe9\x52" + "\x13\x01\x74\x8b\x6e\xb1\x65\x24" + "\xaa\x8d\xbb\x54\x21\x20\x60\xa4" + "\xb7\xa5\xf9\x4e\x7b\xf5\x0b\x70" + "\xd2\xb9\xdc\x9b\xdb\x2c\xb2\x43" + "\xf7\x71\x30\xa5\x13\x6f\x16\x75" + "\xd0\xdf\x72\xae\xe4\xed\xc1\xa3" + "\x81\xe0\xd5\xc0\x0e\x62\xe8\xe5" + "\x86\x2c\x37\xde\xf8\xb0\x21\xe4" + "\xcd\xa6\x76\x9b\xa1\x56\xd3\x67" + "\x70\x69\xd6\x5d\xc7\x65\x19\x59" + "\x43\x9c\xca\x32\xe9\xd1\x48\x92" + "\x71\x79\x87\x73\x24\xcb\xc0\x0f" + "\x23\x3b\x8f\x51\x8a\xb3\x3a\x9c" + "\x74\xa4\x19\xa7\xe4\x4f\x6b\x32", + .len = 160, + } +}; + +static const struct cipher_testvec aria_ctr_tv_template[] = { + { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1", + .klen = 16, + .iv = "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0" + "\xfd\xab\x56\xa6\x6e\xda\x7c\x57", + .ptext = "\x36\x36\x89\x09\xcd\xa8\xd3\x91" + "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0", + .ctext = "\x19\x28\xb5\xf2\x1c\xbc\xf8\xaf" + "\xb9\xae\x1b\x23\x4f\xe1\x6e\x40", + .len = 16, + }, { + .key = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37" + "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe", + .klen = 16, + .iv = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .ptext = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e" + "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1", + .ctext = "\x3f\x8c\xa9\x19\xd6\xb4\xfb\xed" + "\x9c\x6d\xaa\x1b\xe1\xc1\xe6\xa8" + "\xa9\x0a\x63\xd3\xa2\x1e\x6b\xa8" + "\x52\x97\x1e\x81\x34\x6f\x98\x0e", + .len = 32, + }, { + .key = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .klen = 16, + .iv = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4", + .ptext = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0" + "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2" + "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e", + .ctext = "\x28\xd8\xa7\xf8\x74\x98\x00\xfc" + "\xd6\x48\xad\xbd\xbe\x3f\x0e\x7b" + "\x3d\x46\xfd\xde\x3e\x4f\x12\x43" + "\xac\x85\xda\xff\x70\x24\x44\x9d" + "\x1e\xf8\x9f\x30\xba\xca\xe0\x97" + "\x03\x6d\xe1\x1d\xc7\x21\x79\x37", + .len = 48, + }, { + .key = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17", + .klen = 16, + .iv = "\xd9\xa0\x57\x80\xc8\x96\x70\x86" + "\x07\x2c\xf4\x61\x79\x09\x01\x8f", + .ptext = "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57" + "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40" + "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa", + .ctext = "\x29\x31\x55\xd2\xe5\x0b\x81\x39" + "\xf9\xbc\x63\xe2\xfa\x26\x99\xde" + "\xde\x18\x93\x68\x81\x7b\x0a\x4d" + "\xf6\x03\xe1\xee\xf9\x0e\x1f\xe8" + "\xa8\x80\x81\x46\xdc\x24\x43\x3f" + "\xff\xfe\x8c\x3e\x17\x0a\x6d\xa2" + "\x47\x55\x62\xa0\x03\x4e\x48\x67" + "\xa2\x64\xc0\x9b\x6c\xa4\xfd\x6a", + .len = 64, + }, { + .key = "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac", + .klen = 16, + .iv = "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ptext = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d" + "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38" + "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8", + .ctext = "\x38\xbc\xf5\x9d\x0e\x26\xa6\x18" + "\x95\x0b\x23\x54\x09\xa1\xf9\x46" + "\x12\xf1\x42\x57\xa1\xaa\x52\xfa" + "\x8a\xbd\xf2\x03\x63\x4e\xbc\xf7" + "\x21\xea\xed\xca\xdd\x42\x41\x94" + "\xe4\x6c\x07\x06\x19\x59\x30\xff" + "\x8c\x9d\x51\xbf\x2c\x2e\x5b\xa5" + "\x7d\x11\xec\x6b\x21\x08\x12\x18" + "\xe4\xdf\x5a\xfd\xa6\x5f\xee\x2f" + "\x5c\x24\xb7\xea\xc1\xcd\x6d\x68", + .len = 80, + }, { + .key = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3", + .klen = 16, + .iv = "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a", + .ptext = "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41" + "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50" + "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2" + "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda", + .ctext = "\xdf\x79\x58\x30\x6f\x47\x12\x78" + "\x04\xb2\x0b\x1a\x62\x22\xe2\x9f" + "\xfe\xc2\xf5\x6d\x9e\x0e\x2e\x56" + "\x76\x01\x7f\x25\x8f\x6e\xc5\xf3" + "\x91\xff\xcd\x67\xc6\xae\x0b\x01" + "\x4d\x5f\x40\x25\x88\xc5\xe0\x3d" + "\x37\x62\x12\x58\xfe\xc5\x4a\x21" + "\x4a\x86\x8d\x94\xdd\xfd\xe6\xf6" + "\x1e\xa6\x78\x4f\x90\x66\xda\xe4" + "\x4e\x64\xa8\x05\xc6\xd8\x7d\xfb" + "\xac\xc9\x1d\x14\xb5\xb0\xfa\x9c" + "\xe8\x84\xef\x87\xbe\xb4\x2a\x87", + .len = 96, + }, { + .key = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1", + .klen = 16, + .iv = "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad", + .ptext = "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e" + "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb" + "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17" + "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95", + .ctext = "\xe4\x25\x0d\x22\xeb\xbe\x5e\x90" + "\x01\xe5\xae\xc9\x94\xbd\x93\x89" + "\x5f\x98\xf1\x46\x6a\x50\x3b\xa2" + "\x79\xd9\xe4\x9c\x9a\xde\xf2\x8c" + "\x25\x49\x4c\xda\xb4\x2c\x76\xab" + "\x0a\xa8\x51\xaf\xc0\x62\x1b\xe9" + "\xe9\x7a\x35\x6a\x4b\x1f\x48\x00" + "\xeb\x24\x1d\x5e\xdd\x06\x09\x23" + "\x2a\xfa\x8f\x3b\x3e\x9e\x14\x6f" + "\x2a\x3c\xef\x6d\x73\x67\xdd\x6c" + "\xc8\xa5\x57\xc8\x02\xb6\x9a\xe8" + "\x8d\xcf\x10\xfa\x3e\x9c\x4d\xeb" + "\x44\xd2\x05\x31\x40\x94\x77\x87" + "\xf0\x83\xb5\xd2\x2a\x9c\xbc\xe4", + .len = 112, + }, { + .key = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15", + .klen = 16, + .iv = "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12", + .ptext = "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41" + "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3" + "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe" + "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c", + .ctext = "\xa7\x4c\x96\x55\x7c\x07\xce\xb2" + "\x6f\x63\x9f\xc6\x8b\x6f\xc6\x4a" + "\x2c\x47\x8d\x99\xdf\x65\x75\x96" + "\xb7\x1d\x50\x5b\x57\x4a\x69\xcc" + "\xc9\x3a\x18\x8a\xd1\xab\x70\x4a" + "\xa3\x13\x80\xdd\x48\xc0\x6a\x7d" + "\x21\xa8\x22\x06\x32\x47\xc0\x16" + "\x1f\x9a\xc0\x21\x33\x66\xf2\xd8" + "\x69\x79\xae\x02\x82\x3f\xaf\xa6" + "\x98\xdb\xcd\x2a\xe5\x12\x39\x80" + "\x8a\xc1\x73\x99\xe5\xe4\x17\xe3" + "\x56\xc2\x43\xa6\x41\x6b\xb2\xa4" + "\x9f\x81\xc4\xe9\xf4\x29\x65\x50" + "\x69\x81\x80\x4b\x86\xab\x5e\x30" + "\xd0\x81\x9d\x6f\x24\x59\x42\xc7" + "\x6d\x5e\x41\xb8\xf5\x99\xc2\xae", + .len = 128, + }, { + .key = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c", + .klen = 16, + .iv = "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33", + .ptext = "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a" + "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8" + "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e" + "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46", + .ctext = "\x11\x7f\xea\x49\xaf\x24\x52\xa2" + "\xde\x60\x99\x58\x23\xf9\x9e\x91" + "\x73\xd5\x9a\xcb\xdd\x10\xcd\x68" + "\xb8\x9e\xef\xa4\xe9\x2d\xf0\x27" + "\x44\xd4\x9a\xd6\xb6\x9c\x7a\xec" + "\x17\x17\xea\xa7\x8e\xa8\x40\x6b" + "\x43\x3d\x50\x59\x0f\x74\x1b\x9e" + "\x03\xed\x4f\x2f\xb8\xda\xef\xc3" + "\x3f\x29\xb3\xf4\x5c\xcd\xce\x3c" + "\xba\xfb\xc6\xd1\x1d\x6f\x61\x3a" + "\x2b\xbd\xde\x30\xc5\x53\xe0\x6e" + "\xbe\xae\x2f\x81\x13\x0f\xd2\xd5" + "\x14\xda\xd3\x60\x9c\xf8\x00\x86" + "\xe9\x97\x3e\x05\xb3\x95\xb3\x21" + "\x1f\x3c\x56\xef\xcb\x32\x49\x5c" + "\x89\xf1\x34\xe4\x8d\x7f\xde\x01" + "\x1f\xd9\x25\x6d\x34\x1d\x6b\x71" + "\xc9\xa9\xd6\x14\x1a\xf1\x44\x59", + .len = 144, + }, { + .key = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b", + .klen = 16, + .iv = "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12", + .ptext = "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c" + "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57" + "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97" + "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66", + .ctext = "\x5b\xc0\xe8\x17\xa4\xf9\xea\xce" + "\x9e\xf9\xe0\xb1\xac\x37\xe9\x41" + "\x0b\x57\xc6\x55\x54\x50\xfa\xa9" + "\x60\xaf\x7a\x4e\x98\x56\xde\x81" + "\x14\xfc\xac\x21\x81\x3e\xf4\x0f" + "\x40\x92\x30\xa8\x16\x88\x1a\xc3" + "\xf1\x39\xbd\x0a\xb9\x44\xc8\x67" + "\x8c\xaa\x2b\x45\x8b\x5b\x7b\x24" + "\xd5\xd8\x9e\xd3\x59\xa5\xd7\x69" + "\xdf\xf4\x50\xf9\x5f\x4f\x44\x1f" + "\x2c\x75\x68\x6e\x3a\xa8\xae\x4b" + "\x84\xf0\x42\x6c\xc0\x3c\x42\xaf" + "\x87\x2b\x89\xe9\x51\x69\x16\x63" + "\xc5\x62\x13\x05\x4c\xb2\xa9\x69" + "\x01\x14\x73\x88\x8e\x41\x47\xb6" + "\x68\x74\xbc\xe9\xad\xda\x94\xa1" + "\x0c\x12\x8e\xd4\x38\x15\x02\x97" + "\x27\x72\x4d\xdf\x61\xcc\x86\x3d" + "\xd6\x32\x4a\xc3\xa9\x4c\x35\x4f" + "\x5b\x91\x7d\x5c\x79\x59\xb3\xd5", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0", + .klen = 24, + .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57" + "\x36\x36\x89\x09\xcd\xa8\xd3\x91", + .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0" + "\x51\xe3\x8c\xe9\x76\xcd\xff\x37", + .ctext = "\xa4\x12\x2f\xc4\xf0\x6d\xd9\x46" + "\xe4\xe6\xd1\x0b\x6d\x14\xf0\x8f", + .len = 16, + }, { + .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe" + "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .klen = 24, + .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1" + "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .ctext = "\x80\x2b\xf0\x88\xb9\x4b\x8d\xf5" + "\xc3\x0e\x15\x5b\xea\x5d\x5b\xa8" + "\x07\x95\x78\x72\xc0\xb9\xbf\x25" + "\x33\x22\xd1\x05\x56\x46\x62\x25", + .len = 32, + }, { + .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4" + "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0", + .klen = 24, + .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2", + .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e" + "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86", + .ctext = "\x65\x01\x3c\xb0\xac\x4c\x63\xb6" + "\xe7\xf1\xf4\x61\x35\xf4\x36\xde" + "\x7f\x85\xba\x41\xa8\xb0\x27\x11" + "\x86\x2c\x71\x16\x05\x1d\xcf\x70" + "\x35\xef\x23\x17\xfc\xed\x3f\x1a" + "\x8e\xb3\xe5\xdb\x90\xb4\xb8\x35", + .len = 48, + }, { + .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .klen = 24, + .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40", + .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa" + "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ctext = "\x5a\xfb\xb1\x2c\x6e\xe5\xb8\xe0" + "\x80\xb6\x77\xa8\xfe\x10\x3a\x99" + "\x00\x8e\x30\x23\x7d\x50\x87\xda" + "\xc6\x46\x73\x37\x8b\xf1\xab\x26" + "\x2d\xa8\x0c\xa8\x9e\x77\xee\xfc" + "\x78\x4f\x03\x0f\xeb\xc6\x03\x34" + "\xb9\x9c\x4f\x59\x55\xc5\x99\x47" + "\xd4\x7e\xe8\x06\x43\x5f\xa1\x6b", + .len = 64, + }, { + .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d", + .klen = 24, + .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38", + .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8" + "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41", + .ctext = "\xc9\x5f\xe0\x60\x61\x38\x7e\x79" + "\x52\x68\x64\x8f\x55\x9b\x6b\x72" + "\xbf\x09\xef\x2f\xb2\x92\xbb\xa3" + "\xe1\x6a\xeb\xe6\x4e\x7c\x5d\xe0" + "\x6a\x4b\xd0\x57\x3b\x28\x8a\x83" + "\x75\xd4\x5a\x2e\xd1\x9a\x57\xe3" + "\xc5\x43\x36\xde\x02\xac\x2c\x75" + "\xea\x33\x3a\x7e\x5d\xb8\xf6\x12" + "\x42\xbd\x06\x8a\x09\x6b\xd6\xb6" + "\x25\x59\xcd\xbd\x17\xeb\x69\xb3", + .len = 80, + }, { + .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50", + .klen = 24, + .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda" + "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e", + .ctext = "\x03\x2c\x39\x24\x99\xb5\xf6\x79" + "\x91\x89\xb7\xf8\x89\x68\x37\x9d" + "\xe7\x4d\x7d\x1c\x36\xae\x98\xd2" + "\xbf\x2a\xa4\x30\x38\x30\xe7\x5d" + "\xbb\x00\x09\x40\x34\xa4\xef\x82" + "\x23\xca\x0e\xb3\x71\x80\x29\x0a" + "\xa9\x0b\x26\x65\x9a\x12\xbf\x18" + "\xfb\xf8\xe4\xc2\x62\x57\x18\xfb" + "\x1e\x98\xea\x5b\xf6\xd6\x7c\x52" + "\x7a\xba\x0e\x6a\x54\x19\xb6\xfa" + "\xe5\xd7\x60\x40\xb0\x1a\xf1\x09" + "\x70\x96\x23\x49\x98\xfc\x79\xd2", + .len = 96, + }, { + .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb", + .klen = 24, + .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17", + .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95" + "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41", + .ctext = "\xd4\x9a\x04\x54\x05\xd2\xe6\x3f" + "\xb0\xa4\x36\x5e\x1e\x9c\x35\xb0" + "\xa6\x62\x35\x47\xf4\x4d\x08\x9e" + "\x1c\x22\x91\x8e\x7f\x00\xa6\x3e" + "\x0a\x04\x42\x0f\xc4\xa6\x5d\xe2" + "\x49\x4c\x61\x12\xea\x9d\x7d\x7c" + "\xfa\x93\x74\x6b\x79\x8c\xdb\xc6" + "\x47\xf6\xea\x84\x3e\x97\x7d\x87" + "\x40\x38\x92\xc7\x44\xef\xdf\x63" + "\x29\xe4\x5b\x3a\x87\x22\xa1\x3f" + "\x2b\x31\xb1\xa4\x0d\xea\xf3\x0b" + "\xd7\x4f\xb6\x9c\xba\x40\xa3\x2f" + "\x21\x2b\x05\xe4\xca\xef\x87\x04" + "\xe6\xd0\x29\x2c\x29\x26\x57\xcd", + .len = 112, + }, { + .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3", + .klen = 24, + .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe", + .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c" + "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a", + .ctext = "\xbc\x57\x2a\x88\x0a\xd0\x06\x4f" + "\xdb\x7b\x03\x9f\x97\x1a\x20\xfe" + "\xdb\xdc\x8e\x7b\x68\x13\xc8\xf5" + "\x06\xe3\xe0\x7e\xd3\x51\x21\x86" + "\x4f\x32\xdb\x78\xe3\x26\xbe\x34" + "\x52\x4c\x4e\x6b\x85\x52\x63\x8b" + "\x8c\x5c\x0e\x33\xf5\xa3\x88\x2d" + "\x04\xdc\x01\x2d\xbe\xa1\x48\x6d" + "\x50\xf4\x16\xb1\xd7\x4d\x1e\x99" + "\xa8\x1d\x54\xcb\x13\xf9\x85\x51" + "\x18\x9f\xef\x45\x62\x5d\x48\xe5" + "\x0c\x54\xf7\x7b\x33\x18\xce\xb0" + "\xd5\x82\x1b\xe2\x91\xae\xdc\x09" + "\xe2\x97\xa8\x27\x13\x78\xc6\xb8" + "\x20\x06\x1a\x71\x5a\xb3\xbc\x1b" + "\x69\x1f\xcd\x57\x70\xa7\x1e\x35", + .len = 128, + }, { + .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8", + .klen = 24, + .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e", + .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46" + "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c", + .ctext = "\xd7\xb4\xfc\xcc\x1f\xf7\xfc\x7d" + "\x69\xfa\xcb\x01\x60\xf3\x5a\x14" + "\x88\xf7\xea\x43\xaa\x47\xf1\x8a" + "\x4e\xd0\x3c\x50\x58\x35\x95\x21" + "\x5f\xcc\x73\x0b\x97\xa0\x2c\x6b" + "\x70\x4d\x3d\xa8\x21\xbe\xfc\xec" + "\xb6\x55\xf0\x48\x2b\x11\xcc\x4b" + "\xda\xf7\x09\xd9\x18\x7b\x4f\x00" + "\x76\x40\xe0\x7d\x33\xcf\x4f\x77" + "\x91\x97\x63\xfa\x72\xba\x5c\x3d" + "\xcf\x2e\xb8\x19\x56\x4a\xa5\x02" + "\xc3\xb1\x80\xa8\x57\x03\x32\x57" + "\xa8\xe1\x65\xf7\xd3\x52\xc5\xcf" + "\x55\x1e\x34\xe3\x77\xab\x83\xdb" + "\xaf\xd3\x8a\xcc\x96\x1c\xc9\x73" + "\xd9\x0b\xb6\x4c\x31\xac\x2c\x82" + "\xb8\xb4\xc8\xe1\xa5\x71\xcc\xb3" + "\x7e\x85\xb8\xfa\x6b\xef\x41\x24", + .len = 144, + }, { + .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57", + .klen = 24, + .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97", + .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66" + "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c", + .ctext = "\x71\xf6\x96\x02\x07\x71\x1a\x08" + "\x7c\xfe\x33\xc4\xc9\xbe\xe2\xed" + "\xf8\x46\x69\xce\x1b\xdc\xd3\x05" + "\x7a\xec\x26\x4d\x27\x2a\x49\x36" + "\x85\xe1\x5d\xd3\x91\xd7\x68\xb8" + "\x55\xa5\x27\x55\x2d\xc1\x78\x27" + "\x0c\x49\x0a\x24\x3b\x76\x3f\x5f" + "\x29\x1c\x37\x2f\x30\xfc\x50\xcb" + "\xe2\x54\x26\x7d\x97\xa7\xf3\x58" + "\x15\xe1\x4c\xeb\x35\xc9\xd1\x1e" + "\x7e\x7d\xa0\xe5\x62\xa5\x2d\xf6" + "\x77\xb0\xef\x13\x55\xb4\x66\x2c" + "\x3b\x50\x1b\x4d\xc2\x64\xce\xc6" + "\xfe\xf2\xad\xfe\x26\x73\x36\x66" + "\x0c\x2f\x10\x35\x97\x3c\x9c\x98" + "\xc1\x90\xa8\x82\xd7\xc6\x31\x68" + "\xcf\x77\xa8\x5b\xdf\xf9\x5a\x8e" + "\x84\xb5\x0b\x6e\x5b\xec\x36\x89" + "\x0b\xb1\xbf\xb9\x70\x02\x5c\x22" + "\xc3\xd5\xc1\xc6\xfd\x07\xdb\x70", + .len = 160, + }, { + .key = "\x82\x8e\x9e\x06\x7b\xc2\xe9\xb3" + "\x06\xa3\xfa\x99\x42\x67\x87\xac" + "\x21\xc7\xb0\x98\x6c\xf8\x26\x57" + "\x08\xdd\x92\x02\x77\x7b\x35\xe7", + .klen = 32, + .iv = "\xa1\xad\xcb\xdd\xd5\x19\xb6\xd4" + "\x0b\x62\x58\xb0\x6c\xa0\xc1\x58", + .ptext = "\x14\x0d\x8a\x09\x16\x00\x00\xf1" + "\xc0\x20\x86\xf9\x21\xd1\x34\xe2", + .ctext = "\x05\xe3\x34\xaf\x6c\x83\x14\x8b" + "\x9d\x1c\xd6\x87\x74\x91\xdf\x17", + .len = 16, + }, { + .key = "\xc9\xf3\xc4\x93\xd0\xcc\xaf\xb1" + "\x1a\x42\x93\x71\xd8\x4e\xd8\xaa" + "\x52\xad\x93\x2f\xe5\xd9\xaa\x5b" + "\x47\x37\x3a\xed\x13\x92\x35\x16", + .klen = 32, + .iv = "\x81\xc8\x50\xd1\x74\xc3\x1c\x73" + "\xbb\xab\x72\x83\x90\x5a\x15\xcb", + .ptext = "\x65\x11\x93\xaf\xe1\x69\x6c\xbe" + "\x25\x8c\x76\x87\x53\xa4\x80\xae" + "\x51\x94\x36\x3f\xca\xe7\x45\x41" + "\x76\x05\xbf\x8f\x9c\xad\xc0\xe3", + .ctext = "\x6b\x00\x6e\x49\x7a\x6d\xe3\x04" + "\x4e\xf7\x9f\x8a\x1f\x14\xbd\xb1" + "\x51\xbf\x13\x9f\x29\x95\x51\x16" + "\xd0\x23\x9a\x1a\x45\xc2\xc3\xd1", + .len = 32, + }, { + .key = "\xd5\x9f\x52\x34\x12\x99\x8e\x42" + "\xe0\x85\x04\x6f\xeb\xf1\x5d\xd0" + "\xc1\xbf\x3f\x84\xd9\x1e\x71\x44" + "\xd4\xb9\x40\x3c\x02\x2e\x21\x19", + .klen = 32, + .iv = "\x28\xc1\x97\x64\x81\x52\x57\x0e" + "\x02\x8c\xab\x4c\xe2\x60\x14\xa5", + .ptext = "\x5a\xb1\x33\x48\xaa\x51\xe9\xa4" + "\x5c\x2d\xbe\x33\xcc\xc4\x7f\x96" + "\xe8\xde\x2b\xe7\x35\x7a\x11\x4b" + "\x13\x08\x32\xc6\x41\xd8\xec\x54" + "\xa3\xd3\xda\x35\x43\x69\xf6\x88" + "\x97\xca\x00\x1b\x02\x59\x24\x82", + .ctext = "\x03\xaf\x76\xbd\x5e\x5b\xca\xc0" + "\xae\x44\xa2\x2f\xc2\x76\x2f\x50" + "\xfa\x94\x94\x5a\x48\x9d\x9c\x38" + "\xc9\x75\xc9\xb2\x56\x0a\x2d\x91" + "\xb8\xe8\x4e\xaa\xcb\x51\x9b\x6a" + "\x20\x9b\x2b\xc5\xb0\x18\x9d\x01", + .len = 48, + }, { + .key = "\x9c\x5d\xd7\x66\x36\xfa\x02\x20" + "\x99\x61\x62\x86\x0f\x43\x2e\x05" + "\x25\x8b\xfb\xf1\xae\x4c\xde\x18" + "\x0b\xf8\xd0\x9d\xaa\xd4\x56\x04", + .klen = 32, + .iv = "\xcd\xa8\x61\x89\x8d\xbb\x72\xb6" + "\x1e\xfe\x03\x34\x54\x88\x23\xe2", + .ptext = "\x66\x42\x60\x24\xf3\xe4\xe9\x7e" + "\x42\x20\xf4\x61\xce\x1c\x5e\x44" + "\x02\x26\x91\xf7\x41\xa4\xab\x34" + "\x29\x49\xdd\x78\x19\x8f\x10\x10" + "\xf0\x61\xcf\x77\x18\x17\x61\xdf" + "\xc4\xa8\x35\x0e\x75\x1b\x84\x6b" + "\xc3\x3f\x31\x59\x5a\x9c\xf4\xc3" + "\x43\xa9\xb7\xf8\x65\x40\x40\xba", + .ctext = "\xb6\x41\x55\x8f\xeb\x16\x1e\x4c" + "\x81\xa0\x85\x6c\xf0\x07\xa5\x2a" + "\x19\x91\xed\x3e\xd6\x30\x8c\xca" + "\x5d\x0f\x58\xca\xd2\x8a\xac\xa2" + "\x2b\x86\x4f\xb5\x85\x4d\xac\x6d" + "\xe5\x39\x1b\x02\x23\x89\x4e\x4f" + "\x02\x00\xe8\x1b\x40\x85\x21\x2b" + "\xc6\xb1\x98\xed\x70\xb3\xf8\xc3", + .len = 64, + }, { + .key = "\x4b\x4e\x11\x91\x27\xcf\x8c\x66" + "\x17\xfa\x5b\x4c\xa8\xb8\x0f\xa1" + "\x99\x5b\x07\x56\xe1\x8d\x94\x8b" + "\xf2\x86\x5a\x5f\x40\x83\xfa\x06", + .klen = 32, + .iv = "\xfd\x73\xee\x1c\x27\xf3\xb4\x38" + "\xc5\x7c\x2e\xc5\x6e\xdb\x49\x0d", + .ptext = "\x0a\xe2\xdd\x97\xdd\x5e\xd4\xb3" + "\xc1\x49\x8f\x53\xb2\x40\x85\x1c" + "\x90\x37\x2d\xbd\x21\x6b\x1f\x80" + "\x56\x98\x76\x1e\xcf\x6c\x78\xd8" + "\xa0\x3c\x79\xc3\x56\xf7\xfc\x64" + "\x35\x58\x1c\x7c\xc4\x5f\x2a\x25" + "\x8c\x01\x98\x1e\x1c\x1f\x15\x64" + "\x50\xb5\xfa\x02\xd3\x54\xe5\x29" + "\xe3\xd2\xa3\x83\x54\x40\x54\xc5" + "\xd8\x1c\xc9\x84\x7d\xc8\x31\x49", + .ctext = "\x53\x2a\xa8\xa0\x15\xaf\x2f\xc4" + "\x7d\x31\xb4\x61\x80\x5f\xd1\xb6" + "\x7c\xca\x86\xb9\x28\x6e\xb6\x2b" + "\xe3\x4b\x7e\xea\xb3\x4f\xa2\xa2" + "\x4e\x8f\xbe\x22\x66\xb3\x92\xbc" + "\x70\x91\xaf\xa6\x09\x5d\xe2\x05" + "\x38\x62\xd3\x6e\x07\x63\x91\xad" + "\x48\x5a\x42\xe7\xdc\x0d\xb1\xe3" + "\x92\x88\x64\xee\x93\xaa\xaf\x31" + "\x68\x57\x35\x8d\x54\x2c\xfa\xb1", + .len = 80, + }, { + .key = "\x77\x3b\xf5\xe7\x20\xf7\xe0\x0c" + "\x3d\x3a\x83\x17\x83\x79\xd8\x29" + "\x5a\x0a\x25\x7f\xe0\x21\x23\xff" + "\x31\xfd\x60\x10\xe6\x63\xe2\xaf", + .klen = 32, + .iv = "\xdb\x4c\x0d\xc0\x36\xdb\xc7\xa1" + "\xa4\x91\xd9\x05\xe6\xc4\x98\x00", + .ptext = "\x8d\x4d\xc6\x5e\x01\x82\xb3\x39" + "\xc8\x64\xa7\xcb\x05\x19\x84\x80" + "\x3f\x9c\xa8\x4f\x64\xb3\x11\x4b" + "\x0e\x21\xc4\x75\x04\x1d\x6f\xd5" + "\x04\x04\x4d\xc9\xc0\x4b\x4a\x9c" + "\x26\xb7\x68\x5a\xe4\xd0\x61\xe3" + "\x2c\x93\x8e\x3f\xb4\x67\x07\x31" + "\x02\x52\x0c\x0f\xe6\x6d\xa3\xd0" + "\x48\x95\x83\x67\x23\x64\x31\x50" + "\xd2\x5f\x69\x68\x8b\x71\xbf\x01" + "\x29\x99\x86\x36\x2e\xdf\xf1\x7c" + "\x08\x8c\x78\x7a\x93\x9a\x7d\x1b", + .ctext = "\x92\x90\x48\x2f\x3a\x6b\x68\x43" + "\x28\x9b\x7d\x1e\x46\x28\xd8\x58" + "\xd9\x1e\x44\xd7\x24\x91\x65\xb1" + "\x15\xde\xc4\x63\xf1\xb1\x34\x9e" + "\xae\x8c\x51\x94\xc5\x22\x65\x8d" + "\x3d\x85\xf5\x34\x5f\x04\x68\x95" + "\xf2\x66\x62\xbb\xc8\x3f\xe4\x0a" + "\x8a\xb2\x70\xc0\x77\xd5\x96\xef" + "\x9e\x39\x3a\x3e\x0d\x2b\xf9\xfe" + "\xa9\xbc\x00\xba\xc5\x43\xd7\x70" + "\x2f\xef\x1e\x1e\x93\xc2\x5d\xf1" + "\xb5\x50\xb8\xf5\xee\xf4\x26\x6f", + .len = 96, + }, { + .key = "\xe0\x6a\x30\xe1\x35\xb5\xb0\x7c" + "\x54\xc5\x73\x9b\x00\xe5\xe7\x02" + "\xbe\x16\x59\xdc\xd9\x03\x17\x53" + "\xa8\x37\xd1\x5f\x13\x8e\x45\xdb", + .klen = 32, + .iv = "\x54\xe9\x1c\xde\xfb\x26\x0e\x48" + "\x35\x50\x4d\x9b\x4d\x12\x21\x0d", + .ptext = "\x73\x72\xcf\xdb\xbd\xbc\xc0\xdf" + "\x6b\xbb\xdf\x65\x6f\x2f\x43\x3b" + "\x2d\x7c\x0e\x07\x7f\xa0\x95\xdd" + "\xfc\x67\xc1\x11\x7a\xe2\xb5\x4a" + "\xd1\x15\xb0\xd8\xe2\xf0\x35\x48" + "\xd8\x81\x6a\x35\xae\x67\xbf\x61" + "\xf2\x8a\xcf\x04\xc8\x09\x8b\x63" + "\x31\x74\x95\xa5\x8d\x3c\xea\xe2" + "\x5f\x67\xc4\x7e\x51\x88\xbf\xb5" + "\x78\xef\x3a\x76\xd8\x1d\x00\x75" + "\x2b\x7b\x28\x7c\xde\x4b\x39\x01" + "\x5d\xde\x92\xfe\x90\x07\x09\xfd" + "\xa5\xd1\xd3\x72\x11\x6d\xa4\x4e" + "\xd1\x6e\x16\xd1\xf6\x39\x4f\xa0", + .ctext = "\x3b\xc5\xee\xfc\x05\xaf\xa6\xb7" + "\xfe\x12\x24\x79\x31\xad\x32\xb5" + "\xfb\x71\x9b\x02\xad\xf4\x94\x20" + "\x25\x7b\xdb\xdf\x97\x99\xca\xea" + "\xc4\xed\x32\x26\x6b\xc8\xd4\x7b" + "\x5b\x55\xfa\xf9\x5b\xab\x88\xdb" + "\x48\xfe\x67\xd5\x5a\x47\x81\x4e" + "\x3e\x1e\x83\xca\x1d\x04\xe1\xb5" + "\x6c\x1b\xbd\xf2\x2d\xf1\xae\x75" + "\x09\x6a\xf8\xb2\xc3\x27\xee\x08" + "\x66\x94\x72\xc0\x2b\x12\x47\x23" + "\x4d\xde\xb4\xca\xf7\x66\xca\x14" + "\xe7\x68\x1b\xfb\x48\x70\x3e\x4c" + "\x43\xbb\x88\x32\x25\xff\x77\x6a", + .len = 112, + }, { + .key = "\x60\xb6\xde\x17\xca\x4c\xe7\xe0" + "\x07\x0d\x80\xc5\x8a\x2d\x5a\xc2" + "\x2c\xb9\xa4\x5f\x2a\x85\x2c\x3d" + "\x6d\x67\xc8\xee\x0f\xa2\xf4\x09", + .klen = 32, + .iv = "\x1a\xa5\xbc\x7e\x93\xf6\xdd\x28" + "\xb7\x69\x27\xa1\x84\x95\x25\x5a", + .ptext = "\x7b\x88\x00\xeb\xa5\xba\xa1\xa7" + "\xd4\x40\x16\x74\x2b\x42\x37\xda" + "\xe0\xaf\x89\x59\x41\x2f\x62\x00" + "\xf5\x5a\x4e\x3b\x85\x27\xb2\xed" + "\x1b\xa7\xaf\xbe\x89\xf3\x49\xb7" + "\x8c\x63\xc9\x0c\x52\x00\x5f\x38" + "\x3b\x3c\x0c\x4f\xdd\xe1\xbf\x90" + "\x4a\x48\xbf\x3a\x95\xcb\x48\xa2" + "\x92\x7c\x79\x81\xde\x18\x6e\x92" + "\x1f\x36\xa9\x5d\x8d\xc4\xb6\x4d" + "\xb2\xb4\x0e\x09\x6d\xf3\x3d\x01" + "\x3d\x9b\x40\x47\xbc\x69\x31\xa1" + "\x6a\x71\x26\xdc\xac\x10\x56\x63" + "\x15\x23\x7d\x10\xe3\x76\x82\x41" + "\xcd\x80\x57\x2f\xfc\x4d\x22\x7b" + "\x57\xbb\x9a\x0a\x03\xe9\xb3\x13", + .ctext = "\x37\x0d\x47\x21\xbc\x28\x0b\xf7" + "\x85\x5f\x60\x57\xf2\x7f\x92\x20" + "\x5f\xa7\xf6\xf4\xa6\xf5\xdf\x1e" + "\xae\x8e\xeb\x97\xfc\xce\x6a\x25" + "\x6d\x6a\x5b\xd1\x99\xf6\x27\x77" + "\x52\x0c\xf1\xd7\x94\xa0\x67\x5d" + "\x60\x35\xb0\x6d\x01\x45\x52\xc8" + "\x05\xd8\x7f\x69\xaf\x8e\x68\x05" + "\xa8\xa5\x24\x2f\x95\xef\xf1\xd2" + "\x8c\x45\x12\xc5\x7a\xcf\xbb\x99" + "\x25\xaa\xa3\x9b\x3f\xf1\xfc\x9d" + "\xfa\x2c\x26\x9b\x92\x47\x61\x6b" + "\x63\x1e\x41\x67\xcb\xb7\x0f\x52" + "\x70\xd4\x0d\x7e\xef\x34\xa2\x75" + "\x4f\x6a\x55\x9c\x2b\x4a\x02\xdd" + "\x96\x5d\xcb\xca\x45\xa1\xec\xaa", + .len = 128, + }, { + .key = "\x2a\xed\x7d\x76\xfc\xc5\x49\x50" + "\xf4\x90\x0f\xcc\x5d\xff\x0c\x3c" + "\x14\x06\xaf\x68\x8f\xd7\xb6\x25" + "\x1e\x10\x95\x2a\x71\x33\x17\x20", + .klen = 32, + .iv = "\x5b\x58\x47\xf8\xd5\x1e\x91\x81" + "\x46\xe7\x25\x3a\x02\x45\x9c\x65", + .ptext = "\x10\xaf\xde\x5c\x30\x79\x43\x28" + "\x1c\x03\xf8\x50\x0f\x30\xa5\xef" + "\x84\x19\x4c\x09\x40\x03\x75\x1f" + "\x92\x8f\x88\x01\xda\x31\x7a\xe4" + "\x48\xe3\xab\xb4\xe6\x1b\x0f\xac" + "\xd9\xfa\x8d\x23\xe4\xc6\xa4\xa9" + "\x2d\x9a\x54\x52\x44\x5c\x3c\x52" + "\x61\xf0\x00\xca\xed\xab\xed\xe2" + "\x44\x0b\xe0\x18\xba\xa5\x63\xd8" + "\xdc\x5e\x1a\x4c\xf8\xde\x5e\x75" + "\xdf\x42\x27\x7b\xe9\x11\x2f\x41" + "\x3a\x72\x54\x3d\x44\x9c\x3e\x87" + "\x8d\x8d\x43\x2f\xb2\xff\x87\xd4" + "\xad\x98\x68\x72\x53\x61\x19\x7c" + "\x20\x79\x8c\x2b\x37\x0b\x96\x15" + "\xa5\x7d\x4e\x01\xe6\xea\xb6\xfa" + "\xaa\xd3\x9d\xa2\xd9\x11\xc3\xc9" + "\xd4\x0e\x3f\x3e\xfe\x35\x1e\xe5", + .ctext = "\xb0\x2b\x75\x5f\x33\x1b\x05\x49" + "\x06\xf1\x43\x91\xc2\x85\xfa\xac" + "\x3f\x47\xf3\x89\x73\xb2\x0e\xa4" + "\x30\xcb\x87\x39\x53\x5d\x36\x89" + "\x77\xd9\x17\x01\x95\xa6\xe9\x71" + "\x51\x53\xd9\x4f\xa6\xc2\x79\x3d" + "\x2e\x50\x90\x52\x0d\x27\x1a\x46" + "\xf1\xe8\x6e\x7e\x7b\x32\xe5\x22" + "\x22\x1f\xba\x5e\xcf\x25\x6b\x26" + "\x76\xf0\xca\x8e\xdd\x5b\xd3\x09" + "\x6f\x82\x08\x56\x1f\x51\x72\x57" + "\xca\xd1\x60\x07\xfb\x9f\x71\x54" + "\x0f\xf6\x48\x71\xfa\x8f\xcb\xdd" + "\xce\xd3\x16\xcd\xae\x0e\x67\x5e" + "\xea\x8d\xa2\x4a\x4f\x11\xc8\xc8" + "\x2f\x04\xfe\xa8\x2a\x07\x1c\xb1" + "\x77\x39\xda\x8b\xd9\x5c\x94\x6c" + "\x4d\x4d\x13\x51\x6f\x07\x06\x5b", + .len = 144, + }, { + .key = "\x7b\xa7\x4d\x0a\x37\x30\xb9\xf5" + "\x2a\x79\xb4\xbf\xdb\x7f\x9b\x64" + "\x23\x43\xb5\x18\x34\xc4\x5f\xdf" + "\xd9\x2a\x66\x58\x00\x44\xb5\xd9", + .klen = 32, + .iv = "\x75\x34\x30\xc1\xf0\x69\xdf\x0a" + "\x52\xce\x4f\x1e\x2c\x41\x35\xec", + .ptext = "\x81\x47\x55\x3a\xcd\xfe\xa2\x3d" + "\x45\x53\xa7\x67\x61\x74\x25\x80" + "\x98\x89\xfe\xf8\x6a\x9f\x51\x7c" + "\xa4\xe4\xe7\xc7\xe0\x1a\xce\xbb" + "\x4b\x46\x43\xb0\xab\xa8\xd6\x0c" + "\xa0\xf0\xc8\x13\x29\xaf\xb8\x01" + "\x6b\x0c\x7e\x56\xae\xb8\x58\x72" + "\xa9\x24\x44\x61\xff\xf1\xac\xf8" + "\x09\xa8\x48\x21\xd6\xab\x41\x73" + "\x70\x6b\x92\x06\x61\xdc\xb4\x85" + "\x76\x26\x7a\x84\xc3\x9e\x3a\x14" + "\xe7\xf4\x2d\x95\x92\xad\x18\xcc" + "\x44\xd4\x2c\x36\x57\xed\x2b\x9b" + "\x3f\x2b\xcd\xe5\x11\xe3\x62\x33" + "\x42\x3f\xb8\x2a\xb1\x37\x3f\x8b" + "\xe8\xbd\x6b\x0b\x9f\x38\x5a\x5f" + "\x82\x34\xb7\x96\x35\x58\xde\xab" + "\x94\x98\x41\x5b\x3f\xac\x0a\x34" + "\x56\xc0\x02\xef\x81\x6d\xb1\xff" + "\x34\xe8\xc7\x6a\x31\x79\xba\xd8", + .ctext = "\x4e\x00\x7c\x52\x45\x76\xf9\x3d" + "\x1a\xd1\x72\xbc\xb9\x0f\xa9\xfb" + "\x0e\x5b\xe2\x3c\xc7\xae\x92\xf6" + "\xb8\x0b\x0a\x95\x40\xe9\x7f\xe0" + "\x54\x10\xf9\xf6\x23\x1f\x51\xc8" + "\x16\x8b\x2e\x79\xe1\x8c\x0b\x43" + "\xe5\xeb\xb5\x9d\x1e\xc3\x28\x07" + "\x5c\x8d\xb1\xe7\x80\xd3\xce\x62" + "\x8d\xf8\x31\x1f\x29\x8b\x90\xee" + "\xe5\xc3\xfa\x16\xc4\xf0\xc3\x99" + "\xe9\x5e\x19\xba\x37\xb8\xc0\x87" + "\xb5\xc6\xc9\x31\xcb\x6e\x30\xce" + "\x03\x1d\xfe\xce\x08\x32\x00\xeb" + "\x86\xc4\xfb\x48\x01\xda\x93\x73" + "\xcc\xb7\xae\x4e\x94\x20\xeb\xc7" + "\xe3\x33\x4c\xeb\xed\xe2\xfc\x86" + "\x0e\x73\x32\xf9\x1b\xf3\x25\xf3" + "\x74\xad\xd1\xf4\x2c\x45\xa4\xfd" + "\x52\x40\xa2\x4e\xa5\x62\xf6\x02" + "\xbb\xb0\xe3\x23\x86\x67\xb8\xf6", + .len = 160, + } +}; + +static const struct aead_testvec aria_gcm_tv_template[] = { + { + .key = "\xe9\x1e\x5e\x75\xda\x65\x55\x4a" + "\x48\x18\x1f\x38\x46\x34\x95\x62", + .klen = 16, + .iv = "\x00\x00\x20\xe8\xf5\xeb\x00\x00" + "\x00\x00\x31\x5e", + .assoc = "\x80\x08\x31\x5e\xbf\x2e\x6f\xe0" + "\x20\xe8\xf5\xeb", + .alen = 12, + .ptext = "\xf5\x7a\xf5\xfd\x4a\xe1\x95\x62" + "\x97\x6e\xc5\x7a\x5a\x7a\xd5\x5a" + "\x5a\xf5\xc5\xe5\xc5\xfd\xf5\xc5" + "\x5a\xd5\x7a\x4a\x72\x72\xd5\x72" + "\x62\xe9\x72\x95\x66\xed\x66\xe9" + "\x7a\xc5\x4a\x4a\x5a\x7a\xd5\xe1" + "\x5a\xe5\xfd\xd5\xfd\x5a\xc5\xd5" + "\x6a\xe5\x6a\xd5\xc5\x72\xd5\x4a" + "\xe5\x4a\xc5\x5a\x95\x6a\xfd\x6a" + "\xed\x5a\x4a\xc5\x62\x95\x7a\x95" + "\x16\x99\x16\x91\xd5\x72\xfd\x14" + "\xe9\x7a\xe9\x62\xed\x7a\x9f\x4a" + "\x95\x5a\xf5\x72\xe1\x62\xf5\x7a" + "\x95\x66\x66\xe1\x7a\xe1\xf5\x4a" + "\x95\xf5\x66\xd5\x4a\x66\xe1\x6e" + "\x4a\xfd\x6a\x9f\x7a\xe1\xc5\xc5" + "\x5a\xe5\xd5\x6a\xfd\xe9\x16\xc5" + "\xe9\x4a\x6e\xc5\x66\x95\xe1\x4a" + "\xfd\xe1\x14\x84\x16\xe9\x4a\xd5" + "\x7a\xc5\x14\x6e\xd5\x9d\x1c\xc5", + .plen = 160, + .ctext = "\x4d\x8a\x9a\x06\x75\x55\x0c\x70" + "\x4b\x17\xd8\xc9\xdd\xc8\x1a\x5c" + "\xd6\xf7\xda\x34\xf2\xfe\x1b\x3d" + "\xb7\xcb\x3d\xfb\x96\x97\x10\x2e" + "\xa0\xf3\xc1\xfc\x2d\xbc\x87\x3d" + "\x44\xbc\xee\xae\x8e\x44\x42\x97" + "\x4b\xa2\x1f\xf6\x78\x9d\x32\x72" + "\x61\x3f\xb9\x63\x1a\x7c\xf3\xf1" + "\x4b\xac\xbe\xb4\x21\x63\x3a\x90" + "\xff\xbe\x58\xc2\xfa\x6b\xdc\xa5" + "\x34\xf1\x0d\x0d\xe0\x50\x2c\xe1" + "\xd5\x31\xb6\x33\x6e\x58\x87\x82" + "\x78\x53\x1e\x5c\x22\xbc\x6c\x85" + "\xbb\xd7\x84\xd7\x8d\x9e\x68\x0a" + "\xa1\x90\x31\xaa\xf8\x91\x01\xd6" + "\x69\xd7\xa3\x96\x5c\x1f\x7e\x16" + "\x22\x9d\x74\x63\xe0\x53\x5f\x4e" + "\x25\x3f\x5d\x18\x18\x7d\x40\xb8" + "\xae\x0f\x56\x4b\xd9\x70\xb5\xe7" + "\xe2\xad\xfb\x21\x1e\x89\xa9\x53" + "\x5a\xba\xce\x3f\x37\xf5\xa7\x36" + "\xf4\xbe\x98\x4b\xbf\xfb\xed\xc1", + .clen = 176, + }, { + .key = "\x0c\x5f\xfd\x37\xa1\x1e\xdc\x42" + "\xc3\x25\x28\x7f\xc0\x60\x4f\x2e" + "\x3e\x8c\xd5\x67\x1a\x00\xfe\x32" + "\x16\xaa\x5e\xb1\x05\x78\x3b\x54", + .klen = 32, + .iv = "\x00\x00\x20\xe8\xf5\xeb\x00\x00" + "\x00\x00\x31\x5e", + .assoc = "\x80\x08\x31\x5e\xbf\x2e\x6f\xe0" + "\x20\xe8\xf5\xeb", + .alen = 12, + .ptext = "\xf5\x7a\xf5\xfd\x4a\xe1\x95\x62" + "\x97\x6e\xc5\x7a\x5a\x7a\xd5\x5a" + "\x5a\xf5\xc5\xe5\xc5\xfd\xf5\xc5" + "\x5a\xd5\x7a\x4a\x72\x72\xd5\x72" + "\x62\xe9\x72\x95\x66\xed\x66\xe9" + "\x7a\xc5\x4a\x4a\x5a\x7a\xd5\xe1" + "\x5a\xe5\xfd\xd5\xfd\x5a\xc5\xd5" + "\x6a\xe5\x6a\xd5\xc5\x72\xd5\x4a" + "\xe5\x4a\xc5\x5a\x95\x6a\xfd\x6a" + "\xed\x5a\x4a\xc5\x62\x95\x7a\x95" + "\x16\x99\x16\x91\xd5\x72\xfd\x14" + "\xe9\x7a\xe9\x62\xed\x7a\x9f\x4a" + "\x95\x5a\xf5\x72\xe1\x62\xf5\x7a" + "\x95\x66\x66\xe1\x7a\xe1\xf5\x4a" + "\x95\xf5\x66\xd5\x4a\x66\xe1\x6e" + "\x4a\xfd\x6a\x9f\x7a\xe1\xc5\xc5" + "\x5a\xe5\xd5\x6a\xfd\xe9\x16\xc5" + "\xe9\x4a\x6e\xc5\x66\x95\xe1\x4a" + "\xfd\xe1\x14\x84\x16\xe9\x4a\xd5" + "\x7a\xc5\x14\x6e\xd5\x9d\x1c\xc5", + .plen = 160, + .ctext = "\x6f\x9e\x4b\xcb\xc8\xc8\x5f\xc0" + "\x12\x8f\xb1\xe4\xa0\xa2\x0c\xb9" + "\x93\x2f\xf7\x45\x81\xf5\x4f\xc0" + "\x13\xdd\x05\x4b\x19\xf9\x93\x71" + "\x42\x5b\x35\x2d\x97\xd3\xf3\x37" + "\xb9\x0b\x63\xd1\xb0\x82\xad\xee" + "\xea\x9d\x2d\x73\x91\x89\x7d\x59" + "\x1b\x98\x5e\x55\xfb\x50\xcb\x53" + "\x50\xcf\x7d\x38\xdc\x27\xdd\xa1" + "\x27\xc0\x78\xa1\x49\xc8\xeb\x98" + "\x08\x3d\x66\x36\x3a\x46\xe3\x72" + "\x6a\xf2\x17\xd3\xa0\x02\x75\xad" + "\x5b\xf7\x72\xc7\x61\x0e\xa4\xc2" + "\x30\x06\x87\x8f\x0e\xe6\x9a\x83" + "\x97\x70\x31\x69\xa4\x19\x30\x3f" + "\x40\xb7\x2e\x45\x73\x71\x4d\x19" + "\xe2\x69\x7d\xf6\x1e\x7c\x72\x52" + "\xe5\xab\xc6\xba\xde\x87\x6a\xc4" + "\x96\x1b\xfa\xc4\xd5\xe8\x67\xaf" + "\xca\x35\x1a\x48\xae\xd5\x28\x22" + "\xe2\x10\xd6\xce\xd2\xcf\x43\x0f" + "\xf8\x41\x47\x29\x15\xe7\xef\x48", + .clen = 176, + } +}; + static const struct cipher_testvec chacha20_tv_template[] = { { /* RFC7539 A.2. Test Vector #1 */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00" @@ -30168,81 +33364,6 @@ static const struct comp_testvec deflate_decomp_tv_template[] = { }, }; -static const struct comp_testvec zlib_deflate_comp_tv_template[] = { - { - .inlen = 70, - .outlen = 44, - .input = "Join us now and share the software " - "Join us now and share the software ", - .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - }, { - .inlen = 191, - .outlen = 129, - .input = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30" - "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87" - "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40" - "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f" - "\xfd\x7d\x93\x1e\x42\xe8\x51\xec" - "\xee\x20\x9f\x64\x20\x6a\x78\x17" - "\xae\x86\xc8\x23\x74\x59\x78\x80" - "\x10\xb4\xb4\xce\x63\x88\x56\x14" - "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e" - "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c" - "\xae\x51\x7e\x69\x17\x4b\x65\x02" - "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48" - "\xad\x65\x09\x64\x3b\xac\xeb\xd9" - "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c" - "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb" - "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45" - "\x4e", - }, -}; - -static const struct comp_testvec zlib_deflate_decomp_tv_template[] = { - { - .inlen = 128, - .outlen = 191, - .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30" - "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10" - "\x04\x09\x89\xc2\x85\x3f\x70\xb1" - "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1" - "\xef\x49\x68\x12\x51\xae\x76\x67" - "\xd6\x27\x19\x88\x1a\xde\x85\xab" - "\x21\xf2\x08\x5d\x16\x1e\x20\x04" - "\x2d\xad\xf3\x18\xa2\x15\x85\x2d" - "\x69\xc4\x42\x83\x23\xb6\x6c\x89" - "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33" - "\xca\x2f\xed\x62\xa9\x4c\x80\xff" - "\x13\xaf\x52\x37\xed\x0e\x52\x6b" - "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d" - "\x02\x98\xfe\x8a\x87\x83\xa3\x4f" - "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3" - "\xa0\x79\xfa\x02\x2e\x32\x45\x4e", - .output = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - }, { - .inlen = 44, - .outlen = 70, - .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - .output = "Join us now and share the software " - "Join us now and share the software ", - }, -}; - /* * LZO test vectors (null-terminated strings). */ @@ -32435,221 +35556,1720 @@ static const struct hash_testvec blake2b_512_tv_template[] = {{ 0xae, 0x15, 0x81, 0x15, 0xd0, 0x88, 0xa0, 0x3c, }, }}; -static const struct hash_testvec blakes2s_128_tv_template[] = {{ - .digest = (u8[]){ 0x64, 0x55, 0x0d, 0x6f, 0xfe, 0x2c, 0x0a, 0x01, - 0xa1, 0x4a, 0xba, 0x1e, 0xad, 0xe0, 0x20, 0x0c, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0xdc, 0x66, 0xca, 0x8f, 0x03, 0x86, 0x58, 0x01, - 0xb0, 0xff, 0xe0, 0x6e, 0xd8, 0xa1, 0xa9, 0x0e, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x88, 0x1e, 0x42, 0xe7, 0xbb, 0x35, 0x80, 0x82, - 0x63, 0x7c, 0x0a, 0x0f, 0xd7, 0xec, 0x6c, 0x2f, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0xcf, 0x9e, 0x07, 0x2a, 0xd5, 0x22, 0xf2, 0xcd, - 0xa2, 0xd8, 0x25, 0x21, 0x80, 0x86, 0x73, 0x1c, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0xf6, 0x33, 0x5a, 0x2c, 0x22, 0xa0, 0x64, 0xb2, - 0xb6, 0x3f, 0xeb, 0xbc, 0xd1, 0xc3, 0xe5, 0xb2, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x72, 0x66, 0x49, 0x60, 0xf9, 0x4a, 0xea, 0xbe, - 0x1f, 0xf4, 0x60, 0xce, 0xb7, 0x81, 0xcb, 0x09, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xd5, 0xa4, 0x0e, 0xc3, 0x16, 0xc7, 0x51, 0xa6, - 0x3c, 0xd0, 0xd9, 0x11, 0x57, 0xfa, 0x1e, 0xbb, }, -}}; +/* + * Test vectors generated using https://github.com/google/hctr2 + */ +static const struct cipher_testvec aes_xctr_tv_template[] = { + { + .key = "\x9c\x8d\xc4\xbd\x71\x36\xdc\x82" + "\x7c\xa1\xca\xa3\x23\x5a\xdb\xa4", + .iv = "\x8d\xe7\xa5\x6a\x95\x86\x42\xde" + "\xba\xea\x6e\x69\x03\x33\x86\x0f", + .ptext = "\xbd", + .ctext = "\xb9", + .klen = 16, + .len = 1, + }, + { + .key = "\xbc\x1b\x12\x0c\x3f\x18\xcc\x1f" + "\x5a\x1d\xab\x81\xa8\x68\x7c\x63", + .iv = "\x22\xc1\xdd\x25\x0b\x18\xcb\xa5" + "\x4a\xda\x15\x07\x73\xd9\x88\x10", + .ptext = "\x24\x6e\x64\xc6\x15\x26\x9c\xda" + "\x2a\x4b\x57\x12\xff\x7c\xd6\xb5", + .ctext = "\xd6\x47\x8d\x58\x92\xb2\x84\xf9" + "\xb7\xee\x0d\x98\xa1\x39\x4d\x8f", + .klen = 16, + .len = 16, + }, + { + .key = "\x44\x03\xbf\x4c\x30\xf0\xa7\xd6" + "\xbd\x54\xbb\x66\x8e\xa6\x0e\x8a", + .iv = "\xe6\xf7\x26\xdf\x8c\x3c\xaa\x88" + "\xce\xc1\xbd\x43\x3b\x09\x62\xad", + .ptext = "\x3c\xe3\x46\xb9\x8f\x9d\x3f\x8d" + "\xef\xf2\x53\xab\x24\xe2\x29\x08" + "\xf8\x7e\x1d\xa6\x6d\x86\x7d\x60" + "\x97\x63\x93\x29\x71\x94\xb4", + .ctext = "\xd4\xa3\xc6\xb8\xc1\x6f\x70\x1a" + "\x52\x0c\xed\x4c\xaf\x51\x56\x23" + "\x48\x45\x07\x10\x34\xc5\xba\x71" + "\xe5\xf8\x1e\xd8\xcb\xa6\xe7", + .klen = 16, + .len = 31, + }, + { + .key = "\x5b\x17\x30\x94\x19\x31\xa1\xae" + "\x24\x8e\x42\x1e\x82\xe6\xec\xb8", + .iv = "\xd1\x2e\xb9\xb8\xf8\x49\xeb\x68" + "\x06\xeb\x65\x33\x34\xa2\xeb\xf0", + .ptext = "\x19\x75\xec\x59\x60\x1b\x7a\x3e" + "\x62\x46\x87\xf0\xde\xab\x81\x36" + "\x63\x53\x11\xa0\x1f\xce\x25\x85" + "\x49\x6b\x28\xfa\x1c\x92\xe5\x18" + "\x38\x14\x00\x79\xf2\x9e\xeb\xfc" + "\x36\xa7\x6b\xe1\xe5\xcf\x04\x48" + "\x44\x6d\xbd\x64\xb3\xcb\x78\x05" + "\x8d\x7f\x9a\xaf\x3c\xcf\x6c\x45" + "\x6c\x7c\x46\x4c\xa8\xc0\x1e\xe4" + "\x33\xa5\x7b\xbb\x26\xd9\xc0\x32" + "\x9d\x8a\xb3\xf3\x3d\x52\xe6\x48" + "\x4c\x9b\x4c\x6e\xa4\xa3\xad\x66" + "\x56\x48\xd5\x98\x3a\x93\xc4\x85" + "\xe9\x89\xca\xa6\xc1\xc8\xe7\xf8" + "\xc3\xe9\xef\xbe\x77\xe6\xd1\x3a" + "\xa6\x99\xc8\x2d\xdf\x40\x0f\x44", + .ctext = "\xc6\x1a\x01\x1a\x00\xba\x04\xff" + "\x10\xd1\x7e\x5d\xad\x91\xde\x8c" + "\x08\x55\x95\xae\xd7\x22\x77\x40" + "\xf0\x33\x1b\x51\xef\xfe\x3d\x67" + "\xdf\xc4\x9f\x39\x47\x67\x93\xab" + "\xaa\x37\x55\xfe\x41\xe0\xba\xcd" + "\x25\x02\x7c\x61\x51\xa1\xcc\x72" + "\x7a\x20\x26\xb9\x06\x68\xbd\x19" + "\xc5\x2e\x1b\x75\x4a\x40\xb2\xd2" + "\xc4\xee\xd8\x5b\xa4\x55\x7d\x25" + "\xfc\x01\x4d\x6f\x0a\xfd\x37\x5d" + "\x3e\x67\xc0\x35\x72\x53\x7b\xe2" + "\xd6\x19\x5b\x92\x6c\x3a\x8c\x2a" + "\xe2\xc2\xa2\x4f\x2a\xf2\xb5\x15" + "\x65\xc5\x8d\x97\xf9\xbf\x8c\x98" + "\xe4\x50\x1a\xf2\x76\x55\x07\x49", + .klen = 16, + .len = 128, + }, + { + .key = "\x17\xa6\x01\x3d\x5d\xd6\xef\x2d" + "\x69\x8f\x4c\x54\x5b\xae\x43\xf0", + .iv = "\xa9\x1b\x47\x60\x26\x82\xf7\x1c" + "\x80\xf8\x88\xdd\xfb\x44\xd9\xda", + .ptext = "\xf7\x67\xcd\xa6\x04\x65\x53\x99" + "\x90\x5c\xa2\x56\x74\xd7\x9d\xf2" + "\x0b\x03\x7f\x4e\xa7\x84\x72\x2b" + "\xf0\xa5\xbf\xe6\x9a\x62\x3a\xfe" + "\x69\x5c\x93\x79\x23\x86\x64\x85" + "\xeb\x13\xb1\x5a\xd5\x48\x39\xa0" + "\x70\xfb\x06\x9a\xd7\x12\x5a\xb9" + "\xbe\xed\x2c\x81\x64\xf7\xcf\x80" + "\xee\xe6\x28\x32\x2d\x37\x4c\x32" + "\xf4\x1f\x23\x21\xe9\xc8\xc9\xbf" + "\x54\xbc\xcf\xb4\xc2\x65\x39\xdf" + "\xa5\xfb\x14\x11\xed\x62\x38\xcf" + "\x9b\x58\x11\xdd\xe9\xbd\x37\x57" + "\x75\x4c\x9e\xd5\x67\x0a\x48\xc6" + "\x0d\x05\x4e\xb1\x06\xd7\xec\x2e" + "\x9e\x59\xde\x4f\xab\x38\xbb\xe5" + "\x87\x04\x5a\x2c\x2a\xa2\x8f\x3c" + "\xe7\xe1\x46\xa9\x49\x9f\x24\xad" + "\x2d\xb0\x55\x40\x64\xd5\xda\x7e" + "\x1e\x77\xb8\x29\x72\x73\xc3\x84" + "\xcd\xf3\x94\x90\x58\x76\xc9\x2c" + "\x2a\xad\x56\xde\x33\x18\xb6\x3b" + "\x10\xe9\xe9\x8d\xf0\xa9\x7f\x05" + "\xf7\xb5\x8c\x13\x7e\x11\x3d\x1e" + "\x02\xbb\x5b\xea\x69\xff\x85\xcf" + "\x6a\x18\x97\x45\xe3\x96\xba\x4d" + "\x2d\x7a\x70\x78\x15\x2c\xe9\xdc" + "\x4e\x09\x92\x57\x04\xd8\x0b\xa6" + "\x20\x71\x76\x47\x76\x96\x89\xa0" + "\xd9\x29\xa2\x5a\x06\xdb\x56\x39" + "\x60\x33\x59\x04\x95\x89\xf6\x18" + "\x1d\x70\x75\x85\x3a\xb7\x6e", + .ctext = "\xe1\xe7\x3f\xd3\x6a\xb9\x2f\x64" + "\x37\xc5\xa4\xe9\xca\x0a\xa1\xd6" + "\xea\x7d\x39\xe5\xe6\xcc\x80\x54" + "\x74\x31\x2a\x04\x33\x79\x8c\x8e" + "\x4d\x47\x84\x28\x27\x9b\x3c\x58" + "\x54\x58\x20\x4f\x70\x01\x52\x5b" + "\xac\x95\x61\x49\x5f\xef\xba\xce" + "\xd7\x74\x56\xe7\xbb\xe0\x3c\xd0" + "\x7f\xa9\x23\x57\x33\x2a\xf6\xcb" + "\xbe\x42\x14\x95\xa8\xf9\x7a\x7e" + "\x12\x53\x3a\xe2\x13\xfe\x2d\x89" + "\xeb\xac\xd7\xa8\xa5\xf8\x27\xf3" + "\x74\x9a\x65\x63\xd1\x98\x3a\x7e" + "\x27\x7b\xc0\x20\x00\x4d\xf4\xe5" + "\x7b\x69\xa6\xa8\x06\x50\x85\xb6" + "\x7f\xac\x7f\xda\x1f\xf5\x37\x56" + "\x9b\x2f\xd3\x86\x6b\x70\xbd\x0e" + "\x55\x9a\x9d\x4b\x08\xb5\x5b\x7b" + "\xd4\x7c\xb4\x71\x49\x92\x4a\x1e" + "\xed\x6d\x11\x09\x47\x72\x32\x6a" + "\x97\x53\x36\xaf\xf3\x06\x06\x2c" + "\x69\xf1\x59\x00\x36\x95\x28\x2a" + "\xb6\xcd\x10\x21\x84\x73\x5c\x96" + "\x86\x14\x2c\x3d\x02\xdb\x53\x9a" + "\x61\xde\xea\x99\x84\x7a\x27\xf6" + "\xf7\xc8\x49\x73\x4b\xb8\xeb\xd3" + "\x41\x33\xdd\x09\x68\xe2\x64\xb8" + "\x5f\x75\x74\x97\x91\x54\xda\xc2" + "\x73\x2c\x1e\x5a\x84\x48\x01\x1a" + "\x0d\x8b\x0a\xdf\x07\x2e\xee\x77" + "\x1d\x17\x41\x7a\xc9\x33\x63\xfa" + "\x9f\xc3\x74\x57\x5f\x03\x4c", + .klen = 16, + .len = 255, + }, + { + .key = "\xe5\xf1\x48\x2e\x88\xdb\xc7\x28" + "\xa2\x55\x5d\x2f\x90\x02\xdc\xd3" + "\xf5\xd3\x9e\x87\xd5\x58\x30\x4a", + .iv = "\xa6\x40\x39\xf9\x63\x6c\x2d\xd4" + "\x1b\x71\x05\xa4\x88\x86\x11\xd3", + .ptext = "\xb6\x06\xae\x15\x11\x96\xc1\x44" + "\x44\xc2\x98\xf9\xa8\x0a\x0b", + .ctext = "\x27\x3b\x68\x40\xa9\x5e\x74\x6b" + "\x74\x67\x18\xf9\x37\xed\xed", + .klen = 24, + .len = 15, + }, + { + .key = "\xc8\xa0\x27\x67\x04\x3f\xed\xa5" + "\xb4\x0c\x51\x91\x2d\x27\x77\x33" + "\xa5\xfc\x2a\x9f\x78\xd8\x1c\x68", + .iv = "\x83\x99\x1a\xe2\x84\xca\xa9\x16" + "\x8d\xc4\x2d\x1b\x67\xc8\x86\x21", + .ptext = "\xd6\x22\x85\xb8\x5d\x7e\x26\x2e" + "\xbe\x04\x9d\x0c\x03\x91\x45\x4a" + "\x36", + .ctext = "\x0f\x44\xa9\x62\x72\xec\x12\x26" + "\x3a\xc6\x83\x26\x62\x5e\xb7\x13" + "\x05", + .klen = 24, + .len = 17, + }, + { + .key = "\xc5\x87\x18\x09\x0a\x4e\x66\x3e" + "\x50\x90\x19\x93\xc0\x33\xcf\x80" + "\x3a\x36\x6b\x6c\x43\xd7\xe4\x93", + .iv = "\xdd\x0b\x75\x1f\xee\x2f\xb4\x52" + "\x10\x82\x1f\x79\x8a\xa4\x9b\x87", + .ptext = "\x56\xf9\x13\xce\x9f\x30\x10\x11" + "\x1b\x59\xfd\x39\x5a\x29\xa3\x44" + "\x78\x97\x8c\xf6\x99\x6d\x26\xf1" + "\x32\x60\x6a\xeb\x04\x47\x29\x4c" + "\x7e\x14\xef\x4d\x55\x29\xfe\x36" + "\x37\xcf\x0b\x6e\xf3\xce\x15\xd2", + .ctext = "\x8f\x98\xe1\x5a\x7f\xfe\xc7\x05" + "\x76\xb0\xd5\xde\x90\x52\x2b\xa8" + "\xf3\x6e\x3c\x77\xa5\x33\x63\xdd" + "\x6f\x62\x12\xb0\x80\x10\xc1\x28" + "\x58\xe5\xd6\x24\x44\x04\x55\xf3" + "\x6d\x94\xcb\x2c\x7e\x7a\x85\x79", + .klen = 24, + .len = 48, + }, + { + .key = "\x84\x9b\xe8\x10\x4c\xb3\xd1\x7a" + "\xb3\xab\x4e\x6f\x90\x12\x07\xf8" + "\xef\xde\x42\x09\xbf\x34\x95\xb2", + .iv = "\x66\x62\xf9\x48\x9d\x17\xf7\xdf" + "\x06\x67\xf4\x6d\xf2\xbc\xa2\xe5", + .ptext = "\x2f\xd6\x16\x6b\xf9\x4b\x44\x14" + "\x90\x93\xe5\xfd\x05\xaa\x00\x26" + "\xbd\xab\x11\xb8\xf0\xcb\x11\x72" + "\xdd\xc5\x15\x4f\x4e\x1b\xf8\xc9" + "\x8f\x4a\xd5\x69\xf8\x9e\xfb\x05" + "\x8a\x37\x46\xfe\xfa\x58\x9b\x0e" + "\x72\x90\x9a\x06\xa5\x42\xf4\x7c" + "\x35\xd5\x64\x70\x72\x67\xfc\x8b" + "\xab\x5a\x2f\x64\x9b\xa1\xec\xe7" + "\xe6\x92\x69\xdb\x62\xa4\xe7\x44" + "\x88\x28\xd4\x52\x64\x19\xa9\xd7" + "\x0c\x00\xe6\xe7\xc1\x28\xc1\xf5" + "\x72\xc5\xfa\x09\x22\x2e\xf4\x82" + "\xa3\xdc\xc1\x68\xf9\x29\x55\x8d" + "\x04\x67\x13\xa6\x52\x04\x3c\x0c" + "\x14\xf2\x87\x23\x61\xab\x82\xcb" + "\x49\x5b\x6b\xd4\x4f\x0d\xd4\x95" + "\x82\xcd\xe3\x69\x47\x1b\x31\x73" + "\x73\x77\xc1\x53\x7d\x43\x5e\x4a" + "\x80\x3a\xca\x9c\xc7\x04\x1a\x31" + "\x8e\xe6\x76\x7f\xe1\xb3\xd0\x57" + "\xa2\xb2\xf6\x09\x51\xc9\x6d\xbc" + "\x79\xed\x57\x50\x36\xd2\x93\xa4" + "\x40\x5d\xac\x3a\x3b\xb6\x2d\x89" + "\x78\xa2\xbd\x23\xec\x35\x06\xf0" + "\xa8\xc8\xc9\xb0\xe3\x28\x2b\xba" + "\x70\xa0\xfe\xed\x13\xc4\xd7\x90" + "\xb1\x6a\xe0\xe1\x30\x71\x15\xd0" + "\xe2\xb3\xa6\x4e\xb0\x01\xf9\xe7" + "\x59\xc6\x1e\xed\x46\x2b\xe3\xa8" + "\x22\xeb\x7f\x1c\xd9\xcd\xe0\xa6" + "\x72\x42\x2c\x06\x75\xbb\xb7\x6b" + "\xca\x49\x5e\xa1\x47\x8d\x9e\xfe" + "\x60\xcc\x34\x95\x8e\xfa\x1e\x3e" + "\x85\x4b\x03\x54\xea\x34\x1c\x41" + "\x90\x45\xa6\xbe\xcf\x58\x4f\xca" + "\x2c\x79\xc0\x3e\x8f\xd7\x3b\xd4" + "\x55\x74\xa8\xe1\x57\x09\xbf\xab" + "\x2c\xf9\xe4\xdd\x17\x99\x57\x60" + "\x4b\x88\x2a\x7f\x43\x86\xb9\x9a" + "\x60\xbf\x4c\xcf\x9b\x41\xb8\x99" + "\x69\x15\x4f\x91\x4d\xeb\xdf\x6f" + "\xcc\x4c\xf9\x6f\xf2\x33\x23\xe7" + "\x02\x44\xaa\xa2\xfa\xb1\x39\xa5" + "\xff\x88\xf5\x37\x02\x33\x24\xfc" + "\x79\x11\x4c\x94\xc2\x31\x87\x9c" + "\x53\x19\x99\x32\xe4\xde\x18\xf4" + "\x8f\xe2\xe8\xa3\xfb\x0b\xaa\x7c" + "\xdb\x83\x0f\xf6\xc0\x8a\x9b\xcd" + "\x7b\x16\x05\x5b\xe4\xb4\x34\x03" + "\xe3\x8f\xc9\x4b\x56\x84\x2a\x4c" + "\x36\x72\x3c\x84\x4f\xba\xa2\x7f" + "\xf7\x1b\xba\x4d\x8a\xb8\x5d\x51" + "\x36\xfb\xef\x23\x18\x6f\x33\x2d" + "\xbb\x06\x24\x8e\x33\x98\x6e\xcd" + "\x63\x11\x18\x6b\xcc\x1b\x66\xb9" + "\x38\x8d\x06\x8d\x98\x1a\xef\xaa" + "\x35\x4a\x90\xfa\xb1\xd3\xcc\x11" + "\x50\x4c\x54\x18\x60\x5d\xe4\x11" + "\xfc\x19\xe1\x53\x20\x5c\xe7\xef" + "\x8a\x2b\xa8\x82\x51\x5f\x5d\x43" + "\x34\xe5\xcf\x7b\x1b\x6f\x81\x19" + "\xb7\xdf\xa8\x9e\x81\x89\x5f\x33" + "\x69\xaf\xde\x89\x68\x88\xf0\x71", + .ctext = "\xab\x15\x46\x5b\xed\x4f\xa8\xac" + "\xbf\x31\x30\x84\x55\xa4\xb8\x98" + "\x79\xba\xa0\x15\xa4\x55\x20\xec" + "\xf9\x94\x71\xe6\x6a\x6f\xee\x87" + "\x2e\x3a\xa2\x95\xae\x6e\x56\x09" + "\xe9\xc0\x0f\xe2\xc6\xb7\x30\xa9" + "\x73\x8e\x59\x7c\xfd\xe3\x71\xf7" + "\xae\x8b\x91\xab\x5e\x36\xe9\xa8" + "\xff\x17\xfa\xa2\x94\x93\x11\x42" + "\x67\x96\x99\xc5\xf0\xad\x2a\x57" + "\xf9\xa6\x70\x4a\xdf\x71\xff\xc0" + "\xe2\xaf\x9a\xae\x57\x58\x13\x3b" + "\x2d\xf1\xc7\x8f\xdb\x8a\xcc\xce" + "\x53\x1a\x69\x55\x39\xc8\xbe\xc3" + "\x2d\xb1\x03\xd9\xa3\x99\xf4\x8d" + "\xd9\x2d\x27\xae\xa5\xe7\x77\x7f" + "\xbb\x88\x84\xea\xfa\x19\x3f\x44" + "\x61\x21\x8a\x1f\xbe\xac\x60\xb4" + "\xaf\xe9\x00\xab\xef\x3c\x53\x56" + "\xcd\x4b\x53\xd8\x9b\xfe\x88\x23" + "\x5b\x85\x76\x08\xec\xd1\x6e\x4a" + "\x87\xa4\x7d\x29\x4e\x4f\x3f\xc9" + "\xa4\xab\x63\xea\xdd\xef\x9f\x79" + "\x38\x18\x7d\x90\x90\xf9\x12\x57" + "\x1d\x89\xea\xfe\xd4\x47\x45\x32" + "\x6a\xf6\xe7\xde\x22\x7e\xee\xc1" + "\xbc\x2d\xc3\xbb\xe5\xd4\x13\xac" + "\x63\xff\x5b\xb1\x05\x96\xd5\xf3" + "\x07\x9a\x62\xb6\x30\xea\x7d\x1e" + "\xee\x75\x0a\x1b\xcc\x6e\x4d\xa7" + "\xf7\x4d\x74\xd8\x60\x32\x5e\xd0" + "\x93\xd7\x19\x90\x4e\x26\xdb\xe4" + "\x5e\xd4\xa8\xb9\x76\xba\x56\x91" + "\xc4\x75\x04\x1e\xc2\x77\x24\x6f" + "\xf9\xe8\x4a\xec\x7f\x86\x95\xb3" + "\x5c\x2c\x97\xab\xf0\xf7\x74\x5b" + "\x0b\xc2\xda\x42\x40\x34\x16\xed" + "\x06\xc1\x25\x53\x17\x0d\x81\x4e" + "\xe6\xf2\x0f\x6d\x94\x3c\x90\x7a" + "\xae\x20\xe9\x3f\xf8\x18\x67\x6a" + "\x49\x1e\x41\xb6\x46\xab\xc8\xa7" + "\xcb\x19\x96\xf5\x99\xc0\x66\x3e" + "\x77\xcf\x73\x52\x83\x2a\xe2\x48" + "\x27\x6c\xeb\xe7\xe7\xc4\xd5\x6a" + "\x40\x67\xbc\xbf\x6b\x3c\xf3\xbb" + "\x51\x5e\x31\xac\x03\x81\xab\x61" + "\xfa\xa5\xa6\x7d\x8b\xc3\x8a\x75" + "\x28\x7a\x71\x9c\xac\x8f\x76\xfc" + "\xf9\x6c\x5d\x9b\xd7\xf6\x36\x2d" + "\x61\xd5\x61\xaa\xdd\x01\xfc\x57" + "\x91\x10\xcd\xcd\x6d\x27\x63\x24" + "\x67\x46\x7a\xbb\x61\x56\x39\xb1" + "\xd6\x79\xfe\x77\xca\xd6\x73\x59" + "\x6e\x58\x11\x90\x03\x26\x74\x2a" + "\xfa\x52\x12\x47\xfb\x12\xeb\x3e" + "\x88\xf0\x52\x6c\xc0\x54\x7a\x88" + "\x8c\xe5\xde\x9e\xba\xb9\xf2\xe1" + "\x97\x2e\x5c\xbd\xf4\x13\x7e\xf3" + "\xc4\xe1\x87\xa5\x35\xfa\x7c\x71" + "\x1a\xc9\xf4\xa8\x57\xe2\x5a\x6b" + "\x14\xe0\x73\xaf\x56\x6b\xa0\x00" + "\x9e\x5f\x64\xac\x00\xfb\xc4\x92" + "\xe5\xe2\x8a\xb2\x9e\x75\x49\x85" + "\x25\x66\xa5\x1a\xf9\x7d\x1d\x60", + .klen = 24, + .len = 512, + }, + { + .key = "\x05\x60\x3a\x7e\x60\x90\x46\x18" + "\x6c\x60\xba\xeb\x12\xd7\xbe\xd1" + "\xd3\xf6\x10\x46\x9d\xf1\x0c\xb4" + "\x73\xe3\x93\x27\xa8\x2c\x13\xaa", + .iv = "\xf5\x96\xd1\xb6\xcb\x44\xd8\xd0" + "\x3e\xdb\x92\x80\x08\x94\xcd\xd3", + .ptext = "\x78", + .ctext = "\xc5", + .klen = 32, + .len = 1, + }, + { + .key = "\x35\xca\x38\xf3\xd9\xd6\x34\xef" + "\xcd\xee\xa3\x26\x86\xba\xfb\x45" + "\x01\xfa\x52\x67\xff\xc5\x9d\xaa" + "\x64\x9a\x05\xbb\x85\x20\xa7\xf2", + .iv = "\xe3\xda\xf5\xff\x42\x59\x87\x86" + "\xee\x7b\xd6\xb4\x6a\x25\x44\xff", + .ptext = "\x44\x67\x1e\x04\x53\xd2\x4b\xd9" + "\x96\x33\x07\x54\xe4\x8e\x20", + .ctext = "\xcc\x55\x40\x79\x47\x5c\x8b\xa6" + "\xca\x7b\x9f\x50\xe3\x21\xea", + .klen = 32, + .len = 15, + }, + { + .key = "\xaf\xd9\x14\x14\xd5\xdb\xc9\xce" + "\x76\x5c\x5a\xbf\x43\x05\x29\x24" + "\xc4\x13\x68\xcc\xe8\x37\xbd\xb9" + "\x41\x20\xf5\x53\x48\xd0\xa2\xd6", + .iv = "\xa7\xb4\x00\x08\x79\x10\xae\xf5" + "\x02\xbf\x85\xb2\x69\x4c\xc6\x04", + .ptext = "\xac\x6a\xa8\x0c\xb0\x84\xbf\x4c" + "\xae\x94\x20\x58\x7e\x00\x93\x89", + .ctext = "\xd5\xaa\xe2\xe9\x86\x4c\x95\x4e" + "\xde\xb6\x15\xcb\xdc\x1f\x13\x38", + .klen = 32, + .len = 16, + }, + { + .key = "\xed\xe3\x8b\xe7\x1c\x17\xbf\x4a" + "\x02\xe2\xfc\x76\xac\xf5\x3c\x00" + "\x5d\xdc\xfc\x83\xeb\x45\xb4\xcb" + "\x59\x62\x60\xec\x69\x9c\x16\x45", + .iv = "\xe4\x0e\x2b\x90\xd2\xfa\x94\x2e" + "\x10\xe5\x64\x2b\x97\x28\x15\xc7", + .ptext = "\xe6\x53\xff\x60\x0e\xc4\x51\xe4" + "\x93\x4d\xe5\x55\xc5\xd9\xad\x48" + "\x52", + .ctext = "\xba\x25\x28\xf5\xcf\x31\x91\x80" + "\xda\x2b\x95\x5f\x20\xcb\xfb\x9f" + "\xc6", + .klen = 32, + .len = 17, + }, + { + .key = "\x77\x5c\xc0\x73\x9a\x64\x97\x91" + "\x2f\xee\xe0\x20\xc2\x04\x59\x2e" + "\x97\xd2\xa7\x70\xb3\xb0\x21\x6b" + "\x8f\xbf\xb8\x51\xa8\xea\x0f\x62", + .iv = "\x31\x8e\x1f\xcd\xfd\x23\xeb\x7f" + "\x8a\x1f\x1b\x23\x53\x27\x44\xe5", + .ptext = "\xcd\xff\x8c\x9b\x94\x5a\x51\x3f" + "\x40\x93\x56\x93\x66\x39\x63\x1f" + "\xbf\xe6\xa4\xfa\xbe\x79\x93\x03" + "\xf5\x66\x74\x16\xfc\xe4\xce", + .ctext = "\x8b\xd3\xc3\xce\x66\xf8\x66\x4c" + "\xad\xd6\xf5\x0f\xd8\x99\x5a\x75" + "\xa1\x3c\xab\x0b\x21\x36\x57\x72" + "\x88\x29\xe9\xea\x4a\x8d\xe9", + .klen = 32, + .len = 31, + }, + { + .key = "\xa1\x2f\x4d\xde\xfe\xa1\xff\xa8" + "\x73\xdd\xe3\xe2\x95\xfc\xea\x9c" + "\xd0\x80\x42\x0c\xb8\x43\x3e\x99" + "\x39\x38\x0a\x8c\xe8\x45\x3a\x7b", + .iv = "\x32\xc4\x6f\xb1\x14\x43\xd1\x87" + "\xe2\x6f\x5a\x58\x02\x36\x7e\x2a", + .ptext = "\x9e\x5c\x1e\xf1\xd6\x7d\x09\x57" + "\x18\x48\x55\xda\x7d\x44\xf9\x6d" + "\xac\xcd\x59\xbb\x10\xa2\x94\x67" + "\xd1\x6f\xfe\x6b\x4a\x11\xe8\x04" + "\x09\x26\x4f\x8d\x5d\xa1\x7b\x42" + "\xf9\x4b\x66\x76\x38\x12\xfe\xfe", + .ctext = "\x42\xbc\xa7\x64\x15\x9a\x04\x71" + "\x2c\x5f\x94\xba\x89\x3a\xad\xbc" + "\x87\xb3\xf4\x09\x4f\x57\x06\x18" + "\xdc\x84\x20\xf7\x64\x85\xca\x3b" + "\xab\xe6\x33\x56\x34\x60\x5d\x4b" + "\x2e\x16\x13\xd4\x77\xde\x2d\x2b", + .klen = 32, + .len = 48, + }, + { + .key = "\xfb\xf5\xb7\x3d\xa6\x95\x42\xbf" + "\xd2\x94\x6c\x74\x0f\xbc\x5a\x28" + "\x35\x3c\x51\x58\x84\xfb\x7d\x11" + "\x16\x1e\x00\x97\x37\x08\xb7\x16", + .iv = "\x9b\x53\x57\x40\xe6\xd9\xa7\x27" + "\x78\xd4\x9b\xd2\x29\x1d\x24\xa9", + .ptext = "\x8b\x02\x60\x0a\x3e\xb7\x10\x59" + "\xc3\xac\xd5\x2a\x75\x81\xf2\xdb" + "\x55\xca\x65\x86\x44\xfb\xfe\x91" + "\x26\xbb\x45\xb2\x46\x22\x3e\x08" + "\xa2\xbf\x46\xcb\x68\x7d\x45\x7b" + "\xa1\x6a\x3c\x6e\x25\xeb\xed\x31" + "\x7a\x8b\x47\xf9\xde\xec\x3d\x87" + "\x09\x20\x2e\xfa\xba\x8b\x9b\xc5" + "\x6c\x25\x9c\x9d\x2a\xe8\xab\x90" + "\x3f\x86\xee\x61\x13\x21\xd4\xde" + "\xe1\x0c\x95\xfc\x5c\x8a\x6e\x0a" + "\x73\xcf\x08\x69\x44\x4e\xde\x25" + "\xaf\xaa\x56\x04\xc4\xb3\x60\x44" + "\x3b\x8b\x3d\xee\xae\x42\x4b\xd2" + "\x9a\x6c\xa0\x8e\x52\x06\xb2\xd1" + "\x5d\x38\x30\x6d\x27\x9b\x1a\xd8", + .ctext = "\xa3\x78\x33\x78\x95\x95\x97\x07" + "\x53\xa3\xa1\x5b\x18\x32\x27\xf7" + "\x09\x12\x53\x70\x83\xb5\x6a\x9f" + "\x26\x6d\x10\x0d\xe0\x1c\xe6\x2b" + "\x70\x00\xdc\xa1\x60\xef\x1b\xee" + "\xc5\xa5\x51\x17\xae\xcc\xf2\xed" + "\xc4\x60\x07\xdf\xd5\x7a\xe9\x90" + "\x3c\x9f\x96\x5d\x72\x65\x5d\xef" + "\xd0\x94\x32\xc4\x85\x90\x78\xa1" + "\x2e\x64\xf6\xee\x8e\x74\x3f\x20" + "\x2f\x12\x3b\x3d\xd5\x39\x8e\x5a" + "\xf9\x8f\xce\x94\x5d\x82\x18\x66" + "\x14\xaf\x4c\xfe\xe0\x91\xc3\x4a" + "\x85\xcf\xe7\xe8\xf7\xcb\xf0\x31" + "\x88\x7d\xc9\x5b\x71\x9d\x5f\xd2" + "\xfa\xed\xa6\x24\xda\xbb\xb1\x84", + .klen = 32, + .len = 128, + }, + { + .key = "\x32\x37\x2b\x8f\x7b\xb1\x23\x79" + "\x05\x52\xde\x05\xf1\x68\x3f\x6c" + "\xa4\xae\xbc\x21\xc2\xc6\xf0\xbd" + "\x0f\x20\xb7\xa4\xc5\x05\x7b\x64", + .iv = "\xff\x26\x4e\x67\x48\xdd\xcf\xfe" + "\x42\x09\x04\x98\x5f\x1e\xfa\x80", + .ptext = "\x99\xdc\x3b\x19\x41\xf9\xff\x6e" + "\x76\xb5\x03\xfa\x61\xed\xf8\x44" + "\x70\xb9\xf0\x83\x80\x6e\x31\x77" + "\x77\xe4\xc7\xb4\x77\x02\xab\x91" + "\x82\xc6\xf8\x7c\x46\x61\x03\x69" + "\x09\xa0\xf7\x12\xb7\x81\x6c\xa9" + "\x10\x5c\xbb\x55\xb3\x44\xed\xb5" + "\xa2\x52\x48\x71\x90\x5d\xda\x40" + "\x0b\x7f\x4a\x11\x6d\xa7\x3d\x8e" + "\x1b\xcd\x9d\x4e\x75\x8b\x7d\x87" + "\xe5\x39\x34\x32\x1e\xe6\x8d\x51" + "\xd4\x1f\xe3\x1d\x50\xa0\x22\x37" + "\x7c\xb0\xd9\xfb\xb6\xb2\x16\xf6" + "\x6d\x26\xa0\x4e\x8c\x6a\xe6\xb6" + "\xbe\x4c\x7c\xe3\x88\x10\x18\x90" + "\x11\x50\x19\x90\xe7\x19\x3f\xd0" + "\x31\x15\x0f\x06\x96\xfe\xa7\x7b" + "\xc3\x32\x88\x69\xa4\x12\xe3\x64" + "\x02\x30\x17\x74\x6c\x88\x7c\x9b" + "\xd6\x6d\x75\xdf\x11\x86\x70\x79" + "\x48\x7d\x34\x3e\x33\x58\x07\x8b" + "\xd2\x50\xac\x35\x15\x45\x05\xb4" + "\x4d\x31\x97\x19\x87\x23\x4b\x87" + "\x53\xdc\xa9\x19\x78\xf1\xbf\x35" + "\x30\x04\x14\xd4\xcf\xb2\x8c\x87" + "\x7d\xdb\x69\xc9\xcd\xfe\x40\x3e" + "\x8d\x66\x5b\x61\xe5\xf0\x2d\x87" + "\x93\x3a\x0c\x2b\x04\x98\x05\xc2" + "\x56\x4d\xc4\x6c\xcd\x7a\x98\x7e" + "\xe2\x2d\x79\x07\x91\x9f\xdf\x2f" + "\x72\xc9\x8f\xcb\x0b\x87\x1b\xb7" + "\x04\x86\xcb\x47\xfa\x5d\x03", + .ctext = "\x0b\x00\xf7\xf2\xc8\x6a\xba\x9a" + "\x0a\x97\x18\x7a\x00\xa0\xdb\xf4" + "\x5e\x8e\x4a\xb7\xe0\x51\xf1\x75" + "\x17\x8b\xb4\xf1\x56\x11\x05\x9f" + "\x2f\x2e\xba\x67\x04\xe1\xb4\xa5" + "\xfc\x7c\x8c\xad\xc6\xb9\xd1\x64" + "\xca\xbd\x5d\xaf\xdb\x65\x48\x4f" + "\x1b\xb3\x94\x5c\x0b\xd0\xee\xcd" + "\xb5\x7f\x43\x8a\xd8\x8b\x66\xde" + "\xd2\x9c\x13\x65\xa4\x47\xa7\x03" + "\xc5\xa1\x46\x8f\x2f\x84\xbc\xef" + "\x48\x9d\x9d\xb5\xbd\x43\xff\xd2" + "\xd2\x7a\x5a\x13\xbf\xb4\xf6\x05" + "\x17\xcd\x01\x12\xf0\x35\x27\x96" + "\xf4\xc1\x65\xf7\x69\xef\x64\x1b" + "\x6e\x4a\xe8\x77\xce\x83\x01\xb7" + "\x60\xe6\x45\x2a\xcd\x41\x4a\xb5" + "\x8e\xcc\x45\x93\xf1\xd6\x64\x5f" + "\x32\x60\xe4\x29\x4a\x82\x6c\x86" + "\x16\xe4\xcc\xdb\x5f\xc8\x11\xa6" + "\xfe\x88\xd6\xc3\xe5\x5c\xbb\x67" + "\xec\xa5\x7b\xf5\xa8\x4f\x77\x25" + "\x5d\x0c\x2a\x99\xf9\xb9\xd1\xae" + "\x3c\x83\x2a\x93\x9b\x66\xec\x68" + "\x2c\x93\x02\x8a\x8a\x1e\x2f\x50" + "\x09\x37\x19\x5c\x2a\x3a\xc2\xcb" + "\xcb\x89\x82\x81\xb7\xbb\xef\x73" + "\x8b\xc9\xae\x42\x96\xef\x70\xc0" + "\x89\xc7\x3e\x6a\x26\xc3\xe4\x39" + "\x53\xa9\xcf\x63\x7d\x05\xf3\xff" + "\x52\x04\xf6\x7f\x23\x96\xe9\xf7" + "\xff\xd6\x50\xa3\x0e\x20\x71", + .klen = 32, + .len = 255, + }, + { + .key = "\x39\x5f\xf4\x9c\x90\x3a\x9a\x25" + "\x15\x11\x79\x39\xed\x26\x5e\xf6" + "\xda\xcf\x33\x4f\x82\x97\xab\x10" + "\xc1\x55\x48\x82\x80\xa8\x02\xb2", + .iv = "\x82\x60\xd9\x06\xeb\x40\x99\x76" + "\x08\xc5\xa4\x83\x45\xb8\x38\x5a", + .ptext = "\xa1\xa8\xac\xac\x08\xaf\x8f\x84" + "\xbf\xcc\x79\x31\x5e\x61\x01\xd1" + "\x4d\x5f\x9b\xcd\x91\x92\x9a\xa1" + "\x99\x0d\x49\xb2\xd7\xfd\x25\x93" + "\x51\x96\xbd\x91\x8b\x08\xf1\xc6" + "\x0d\x17\xf6\xef\xfd\xd2\x78\x16" + "\xc8\x08\x27\x7b\xca\x98\xc6\x12" + "\x86\x11\xdb\xd5\x08\x3d\x5a\x2c" + "\xcf\x15\x0e\x9b\x42\x78\xeb\x1f" + "\x52\xbc\xd7\x5a\x8a\x33\x6c\x14" + "\xfc\x61\xad\x2e\x1e\x03\x66\xea" + "\x79\x0e\x88\x88\xde\x93\xe3\x81" + "\xb5\xc4\x1c\xe6\x9c\x08\x18\x8e" + "\xa0\x87\xda\xe6\xf8\xcb\x30\x44" + "\x2d\x4e\xc0\xa3\x60\xf9\x62\x7b" + "\x4b\xd5\x61\x6d\xe2\x67\x95\x54" + "\x10\xd1\xca\x22\xe8\xb6\xb1\x3a" + "\x2d\xd7\x35\x5b\x22\x88\x55\x67" + "\x3d\x83\x8f\x07\x98\xa8\xf2\xcf" + "\x04\xb7\x9e\x52\xca\xe0\x98\x72" + "\x5c\xc1\x00\xd4\x1f\x2c\x61\xf3" + "\xe8\x40\xaf\x4a\xee\x66\x41\xa0" + "\x02\x77\x29\x30\x65\x59\x4b\x20" + "\x7b\x0d\x80\x97\x27\x7f\xd5\x90" + "\xbb\x9d\x76\x90\xe5\x43\x43\x72" + "\xd0\xd4\x14\x75\x66\xb3\xb6\xaf" + "\x09\xe4\x23\xb0\x62\xad\x17\x28" + "\x39\x26\xab\xf5\xf7\x5c\xb6\x33" + "\xbd\x27\x09\x5b\x29\xe4\x40\x0b" + "\xc1\x26\x32\xdb\x9a\xdf\xf9\x5a" + "\xae\x03\x2c\xa4\x40\x84\x9a\xb7" + "\x4e\x47\xa8\x0f\x23\xc7\xbb\xcf" + "\x2b\xf2\x32\x6c\x35\x6a\x91\xba" + "\x0e\xea\xa2\x8b\x2f\xbd\xb5\xea" + "\x6e\xbc\xb5\x4b\x03\xb3\x86\xe0" + "\x86\xcf\xba\xcb\x38\x2c\x32\xa6" + "\x6d\xe5\x28\xa6\xad\xd2\x7f\x73" + "\x43\x14\xf8\xb1\x99\x12\x2d\x2b" + "\xdf\xcd\xf2\x81\x43\x94\xdf\xb1" + "\x17\xc9\x33\xa6\x3d\xef\x96\xb8" + "\xd6\x0d\x00\xec\x49\x66\x85\x5d" + "\x44\x62\x12\x04\x55\x5c\x48\xd3" + "\xbd\x73\xac\x54\x8f\xbf\x97\x8e" + "\x85\xfd\xc2\xa1\x25\x32\x38\x6a" + "\x1f\xac\x57\x3c\x4f\x56\x73\xf2" + "\x1d\xb6\x48\x68\xc7\x0c\xe7\x60" + "\xd2\x8e\x4d\xfb\xc7\x20\x7b\xb7" + "\x45\x28\x12\xc6\x26\xae\xea\x7c" + "\x5d\xe2\x46\xb5\xae\xe1\xc3\x98" + "\x6f\x72\xd5\xa2\xfd\xed\x40\xfd" + "\xf9\xdf\x61\xec\x45\x2c\x15\xe0" + "\x1e\xbb\xde\x71\x37\x5f\x73\xc2" + "\x11\xcc\x6e\x6d\xe1\xb5\x1b\xd2" + "\x2a\xdd\x19\x8a\xc2\xe1\xa0\xa4" + "\x26\xeb\xb2\x2c\x4f\x77\x52\xf1" + "\x42\x72\x6c\xad\xd7\x78\x5d\x72" + "\xc9\x16\x26\x25\x1b\x4c\xe6\x58" + "\x79\x57\xb5\x06\x15\x4f\xe5\xba" + "\xa2\x7f\x2d\x5b\x87\x8a\x44\x70" + "\xec\xc7\xef\x84\xae\x60\xa2\x61" + "\x86\xe9\x18\xcd\x28\xc4\xa4\xf5" + "\xbc\x84\xb8\x86\xa0\xba\xf1\xf1" + "\x08\x3b\x32\x75\x35\x22\x7a\x65" + "\xca\x48\xe8\xef\x6e\xe2\x8e\x00", + .ctext = "\x2f\xae\xd8\x67\xeb\x15\xde\x75" + "\x53\xa3\x0e\x5a\xcf\x1c\xbe\xea" + "\xde\xf9\xcf\xc2\x9f\xfd\x0f\x44" + "\xc0\xe0\x7a\x76\x1d\xcb\x4a\xf8" + "\x35\xd6\xe3\x95\x98\x6b\x3f\x89" + "\xc4\xe6\xb6\x6f\xe1\x8b\x39\x4b" + "\x1c\x6c\x77\xe4\xe1\x8a\xbc\x61" + "\x00\x6a\xb1\x37\x2f\x45\xe6\x04" + "\x52\x0b\xfc\x1e\x32\xc1\xd8\x9d" + "\xfa\xdd\x67\x5c\xe0\x75\x83\xd0" + "\x21\x9e\x02\xea\xc0\x7f\xc0\x29" + "\xb3\x6c\xa5\x97\xb3\x29\x82\x1a" + "\x94\xa5\xb4\xb6\x49\xe5\xa5\xad" + "\x95\x40\x52\x7c\x84\x88\xa4\xa8" + "\x26\xe4\xd9\x5d\x41\xf2\x93\x7b" + "\xa4\x48\x1b\x66\x91\xb9\x7c\xc2" + "\x99\x29\xdf\xd8\x30\xac\xd4\x47" + "\x42\xa0\x14\x87\x67\xb8\xfd\x0b" + "\x1e\xcb\x5e\x5c\x9a\xc2\x04\x8b" + "\x17\x29\x9d\x99\x7f\x86\x4c\xe2" + "\x5c\x96\xa6\x0f\xb6\x47\x33\x5c" + "\xe4\x50\x49\xd5\x4f\x92\x0b\x9a" + "\xbc\x52\x4c\x41\xf5\xc9\x3e\x76" + "\x55\x55\xd4\xdc\x71\x14\x23\xfc" + "\x5f\xd5\x08\xde\xa0\xf7\x28\xc0" + "\xe1\x61\xac\x64\x66\xf6\xd1\x31" + "\xe4\xa4\xa9\xed\xbc\xad\x4f\x3b" + "\x59\xb9\x48\x1b\xe7\xb1\x6f\xc6" + "\xba\x40\x1c\x0b\xe7\x2f\x31\x65" + "\x85\xf5\xe9\x14\x0a\x31\xf5\xf3" + "\xc0\x1c\x20\x35\x73\x38\x0f\x8e" + "\x39\xf0\x68\xae\x08\x9c\x87\x4b" + "\x42\xfc\x22\x17\xee\x96\x51\x2a" + "\xd8\x57\x5a\x35\xea\x72\x74\xfc" + "\xb3\x0e\x69\x9a\xe1\x4f\x24\x90" + "\xc5\x4b\xe5\xd7\xe3\x82\x2f\xc5" + "\x62\x46\x3e\xab\x72\x4e\xe0\xf3" + "\x90\x09\x4c\xb2\xe1\xe8\xa0\xf5" + "\x46\x40\x2b\x47\x85\x3c\x21\x90" + "\x3d\xad\x25\x5a\x36\xdf\xe5\xbc" + "\x7e\x80\x4d\x53\x77\xf1\x79\xa6" + "\xec\x22\x80\x88\x68\xd6\x2d\x8b" + "\x3e\xf7\x52\xc7\x2a\x20\x42\x5c" + "\xed\x99\x4f\x32\x80\x00\x7e\x73" + "\xd7\x6d\x7f\x7d\x42\x54\x4a\xfe" + "\xff\x6f\x61\xca\x2a\xbb\x4f\xeb" + "\x4f\xe4\x4e\xaf\x2c\x4f\x82\xcd" + "\xa1\xa7\x11\xb3\x34\x33\xcf\x32" + "\x63\x0e\x24\x3a\x35\xbe\x06\xd5" + "\x17\xcb\x02\x30\x33\x6e\x8c\x49" + "\x40\x6e\x34\x8c\x07\xd4\x3e\xe6" + "\xaf\x78\x6d\x8c\x10\x5f\x21\x58" + "\x49\x26\xc5\xaf\x0d\x7d\xd4\xaf" + "\xcd\x5b\xa1\xe3\xf6\x39\x1c\x9b" + "\x8e\x00\xa1\xa7\x9e\x17\x4a\xc0" + "\x54\x56\x9e\xcf\xcf\x88\x79\x8d" + "\x50\xf7\x56\x8e\x0a\x73\x46\x6b" + "\xc3\xb9\x9b\x6c\x7d\xc4\xc8\xb6" + "\x03\x5f\x30\x62\x7d\xe6\xdb\x15" + "\xe1\x39\x02\x8c\xff\xda\xc8\x43" + "\xf2\xa9\xbf\x00\xe7\x3a\x61\x89" + "\xdf\xb0\xca\x7d\x8c\x8a\x6a\x9f" + "\x18\x89\x3d\x39\xac\x36\x6f\x05" + "\x1f\xb5\xda\x00\xea\xe1\x51\x21", + .klen = 32, + .len = 512, + }, -static const struct hash_testvec blakes2s_160_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0xb4, 0xf2, 0x03, 0x49, 0x37, 0xed, 0xb1, 0x3e, - 0x5b, 0x2a, 0xca, 0x64, 0x82, 0x74, 0xf6, 0x62, - 0xe3, 0xf2, 0x84, 0xff, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xaa, 0x56, 0x9b, 0xdc, 0x98, 0x17, 0x75, 0xf2, - 0xb3, 0x68, 0x83, 0xb7, 0x9b, 0x8d, 0x48, 0xb1, - 0x9b, 0x2d, 0x35, 0x05, }, -}, { - .ksize = 1, - .key = "B", - .digest = (u8[]){ 0x50, 0x16, 0xe7, 0x0c, 0x01, 0xd0, 0xd3, 0xc3, - 0xf4, 0x3e, 0xb1, 0x6e, 0x97, 0xa9, 0x4e, 0xd1, - 0x79, 0x65, 0x32, 0x93, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x1c, 0x2b, 0xcd, 0x9a, 0x68, 0xca, 0x8c, 0x71, - 0x90, 0x29, 0x6c, 0x54, 0xfa, 0x56, 0x4a, 0xef, - 0xa2, 0x3a, 0x56, 0x9c, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0x36, 0xc3, 0x5f, 0x9a, 0xdc, 0x7e, 0xbf, 0x19, - 0x68, 0xaa, 0xca, 0xd8, 0x81, 0xbf, 0x09, 0x34, - 0x83, 0x39, 0x0f, 0x30, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0x86, 0x80, 0x78, 0xa4, 0x14, 0xec, 0x03, 0xe5, - 0xb6, 0x9a, 0x52, 0x0e, 0x42, 0xee, 0x39, 0x9d, - 0xac, 0xa6, 0x81, 0x63, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x2d, 0xd8, 0xd2, 0x53, 0x66, 0xfa, 0xa9, 0x01, - 0x1c, 0x9c, 0xaf, 0xa3, 0xe2, 0x9d, 0x9b, 0x10, - 0x0a, 0xf6, 0x73, 0xe8, }, -}}; +}; -static const struct hash_testvec blakes2s_224_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x61, 0xb9, 0x4e, 0xc9, 0x46, 0x22, 0xa3, 0x91, - 0xd2, 0xae, 0x42, 0xe6, 0x45, 0x6c, 0x90, 0x12, - 0xd5, 0x80, 0x07, 0x97, 0xb8, 0x86, 0x5a, 0xfc, - 0x48, 0x21, 0x97, 0xbb, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x9e, 0xda, 0xc7, 0x20, 0x2c, 0xd8, 0x48, 0x2e, - 0x31, 0x94, 0xab, 0x46, 0x6d, 0x94, 0xd8, 0xb4, - 0x69, 0xcd, 0xae, 0x19, 0x6d, 0x9e, 0x41, 0xcc, - 0x2b, 0xa4, 0xd5, 0xf6, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .digest = (u8[]){ 0x32, 0xc0, 0xac, 0xf4, 0x3b, 0xd3, 0x07, 0x9f, - 0xbe, 0xfb, 0xfa, 0x4d, 0x6b, 0x4e, 0x56, 0xb3, - 0xaa, 0xd3, 0x27, 0xf6, 0x14, 0xbf, 0xb9, 0x32, - 0xa7, 0x19, 0xfc, 0xb8, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0x73, 0xad, 0x5e, 0x6d, 0xb9, 0x02, 0x8e, 0x76, - 0xf2, 0x66, 0x42, 0x4b, 0x4c, 0xfa, 0x1f, 0xe6, - 0x2e, 0x56, 0x40, 0xe5, 0xa2, 0xb0, 0x3c, 0xe8, - 0x7b, 0x45, 0xfe, 0x05, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0x16, 0x60, 0xfb, 0x92, 0x54, 0xb3, 0x6e, 0x36, - 0x81, 0xf4, 0x16, 0x41, 0xc3, 0x3d, 0xd3, 0x43, - 0x84, 0xed, 0x10, 0x6f, 0x65, 0x80, 0x7a, 0x3e, - 0x25, 0xab, 0xc5, 0x02, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0xca, 0xaa, 0x39, 0x67, 0x9c, 0xf7, 0x6b, 0xc7, - 0xb6, 0x82, 0xca, 0x0e, 0x65, 0x36, 0x5b, 0x7c, - 0x24, 0x00, 0xfa, 0x5f, 0xda, 0x06, 0x91, 0x93, - 0x6a, 0x31, 0x83, 0xb5, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0x90, 0x02, 0x26, 0xb5, 0x06, 0x9c, 0x36, 0x86, - 0x94, 0x91, 0x90, 0x1e, 0x7d, 0x2a, 0x71, 0xb2, - 0x48, 0xb5, 0xe8, 0x16, 0xfd, 0x64, 0x33, 0x45, - 0xb3, 0xd7, 0xec, 0xcc, }, -}}; +/* + * Test vectors generated using https://github.com/google/hctr2 + */ +static const struct cipher_testvec aes_hctr2_tv_template[] = { + { + .key = "\xe1\x15\x66\x3c\x8d\xc6\x3a\xff" + "\xef\x41\xd7\x47\xa2\xcc\x8a\xba", + .iv = "\xc3\xbe\x2a\xcb\xb5\x39\x86\xf1" + "\x91\xad\x6c\xf4\xde\x74\x45\x63" + "\x5c\x7a\xd5\xcc\x8b\x76\xef\x0e" + "\xcf\x2c\x60\x69\x37\xfd\x07\x96", + .ptext = "\x65\x75\xae\xd3\xe2\xbc\x43\x5c" + "\xb3\x1a\xd8\x05\xc3\xd0\x56\x29", + .ctext = "\x11\x91\xea\x74\x58\xcc\xd5\xa2" + "\xd0\x55\x9e\x3d\xfe\x7f\xc8\xfe", + .klen = 16, + .len = 16, + }, + { + .key = "\xe7\xd1\x77\x48\x76\x0b\xcd\x34" + "\x2a\x2d\xe7\x74\xca\x11\x9c\xae", + .iv = "\x71\x1c\x49\x62\xd9\x5b\x50\x5e" + "\x68\x87\xbc\xf6\x89\xff\xed\x30" + "\xe4\xe5\xbd\xb6\x10\x4f\x9f\x66" + "\x28\x06\x5a\xf4\x27\x35\xcd\xe5", + .ptext = "\x87\x03\x8f\x06\xa8\x61\x54\xda" + "\x01\x45\xd4\x01\xef\x4a\x22\xcf" + "\x78\x15\x9f\xbd\x64\xbd\x2c\xb9" + "\x40\x1d\x72\xae\x53\x63\xa5", + .ctext = "\x4e\xa1\x05\x27\xb8\x45\xe4\xa1" + "\xbb\x30\xb4\xa6\x12\x74\x63\xd6" + "\x17\xc9\xcc\x2f\x18\x64\xe0\x06" + "\x0a\xa0\xff\x72\x10\x7b\x22", + .klen = 16, + .len = 31, + }, + { + .key = "\x59\x65\x3b\x1d\x43\x5e\xc0\xae" + "\xb8\x9d\x9b\xdd\x22\x03\xbf\xca", + .iv = "\xec\x95\xfa\x5a\xcf\x5e\xd2\x93" + "\xa3\xb5\xe5\xbe\xf3\x01\x7b\x01" + "\xd1\xca\x6c\x06\x82\xf0\xbd\x67" + "\xd9\x6c\xa4\xdc\xb4\x38\x0f\x74", + .ptext = "\x45\xdf\x75\x87\xbc\x72\xce\x55" + "\xc9\xfa\xcb\xfc\x9f\x40\x82\x2b" + "\xc6\x4f\x4f\x5b\x8b\x3b\x6d\x67" + "\xa6\x93\x62\x89\x8c\x19\xf4\xe3" + "\x08\x92\x9c\xc9\x47\x2c\x6e\xd0" + "\xa3\x02\x2b\xdb\x2c\xf2\x8d\x46" + "\xcd\xb0\x9d\x26\x63\x4c\x40\x6b" + "\x79\x43\xe5\xce\x42\xa8\xec\x3b" + "\x5b\xd0\xea\xa4\xe6\xdb\x66\x55" + "\x7a\x76\xec\xab\x7d\x2a\x2b\xbd" + "\xa9\xab\x22\x64\x1a\xa1\xae\x84" + "\x86\x79\x67\xe9\xb2\x50\xbe\x12" + "\x2f\xb2\x14\xf0\xdb\x71\xd8\xa7" + "\x41\x8a\x88\xa0\x6a\x6e\x9d\x2a" + "\xfa\x11\x37\x40\x32\x09\x4c\x47" + "\x41\x07\x31\x85\x3d\xa8\xf7\x64", + .ctext = "\x2d\x4b\x9f\x93\xca\x5a\x48\x26" + "\x01\xcc\x54\xe4\x31\x50\x12\xf0" + "\x49\xff\x59\x42\x68\xbd\x87\x8f" + "\x9e\x62\x96\xcd\xb9\x24\x57\xa4" + "\x0b\x7b\xf5\x2e\x0e\xa8\x65\x07" + "\xab\x05\xd5\xca\xe7\x9c\x6c\x34" + "\x5d\x42\x34\xa4\x62\xe9\x75\x48" + "\x3d\x9e\x8f\xfa\x42\xe9\x75\x08" + "\x4e\x54\x91\x2b\xbd\x11\x0f\x8e" + "\xf0\x82\xf5\x24\xf1\xc4\xfc\xae" + "\x42\x54\x7f\xce\x15\xa8\xb2\x33" + "\xc0\x86\xb6\x2b\xe8\x44\xce\x1f" + "\x68\x57\x66\x94\x6e\xad\xeb\xf3" + "\x30\xf8\x11\xbd\x60\x00\xc6\xd5" + "\x4c\x81\xf1\x20\x2b\x4a\x5b\x99" + "\x79\x3b\xc9\x5c\x74\x23\xe6\x5d", + .klen = 16, + .len = 128, + }, + { + .key = "\x3e\x08\x5d\x64\x6c\x98\xec\xec" + "\x70\x0e\x0d\xa1\x41\x20\x99\x82", + .iv = "\x11\xb7\x77\x91\x0d\x99\xd9\x8d" + "\x35\x3a\xf7\x14\x6b\x09\x37\xe5" + "\xad\x51\xf6\xc3\x96\x4b\x64\x56" + "\xa8\xbd\x81\xcc\xbe\x94\xaf\xe4", + .ptext = "\xff\x8d\xb9\xc0\xe3\x69\xb3\xb2" + "\x8b\x11\x26\xb3\x11\xec\xfb\xb9" + "\x9c\xc1\x71\xd6\xe3\x26\x0e\xe0" + "\x68\x40\x60\xb9\x3a\x63\x56\x8a" + "\x9e\xc1\xf0\x10\xb1\x64\x32\x70" + "\xf8\xcd\xc6\xc4\x49\x4c\xe1\xce" + "\xf3\xe1\x03\xf8\x35\xae\xe0\x5e" + "\xef\x5f\xbc\x41\x75\x26\x13\xcc" + "\x37\x85\xdf\xc0\x5d\xa6\x47\x98" + "\xf1\x97\x52\x58\x04\xe6\xb5\x01" + "\xc0\xb8\x17\x6d\x74\xbd\x9a\xdf" + "\xa4\x37\x94\x86\xb0\x13\x83\x28" + "\xc9\xa2\x07\x3f\xb5\xb2\x72\x40" + "\x0e\x60\xdf\x57\x07\xb7\x2c\x66" + "\x10\x3f\x8d\xdd\x30\x0a\x47\xd5" + "\xe8\x9d\xfb\xa1\xaf\x53\xd7\x05" + "\xc7\xd2\xba\xe7\x2c\xa0\xbf\xb8" + "\xd1\x93\xe7\x41\x82\xa3\x41\x3a" + "\xaf\x12\xd6\xf8\x34\xda\x92\x46" + "\xad\xa2\x2f\xf6\x7e\x46\x96\xd8" + "\x03\xf3\x49\x64\xde\xd8\x06\x8b" + "\xa0\xbc\x63\x35\x38\xb6\x6b\xda" + "\x5b\x50\x3f\x13\xa5\x84\x1b\x1b" + "\x66\x89\x95\xb7\xc2\x16\x3c\xe9" + "\x24\xb0\x8c\x6f\x49\xef\xf7\x28" + "\x6a\x24\xfd\xbe\x25\xe2\xb4\x90" + "\x77\x44\x08\xb8\xda\xd2\xde\x2c" + "\xa0\x57\x45\x57\x29\x47\x6b\x89" + "\x4a\xf6\xa7\x2a\xc3\x9e\x7b\xc8" + "\xfd\x9f\x89\xab\xee\x6d\xa3\xb4" + "\x23\x90\x7a\xe9\x89\xa0\xc7\xb3" + "\x17\x41\x87\x91\xfc\x97\x42", + .ctext = "\xfc\x9b\x96\x66\xc4\x82\x2a\x4a" + "\xb1\x24\xba\xc7\x78\x5f\x79\xc1" + "\x57\x2e\x47\x29\x4d\x7b\xd2\x9a" + "\xbd\xc6\xc1\x26\x7b\x8e\x3f\x5d" + "\xd4\xb4\x9f\x6a\x02\x24\x4a\xad" + "\x0c\x00\x1b\xdf\x92\xc5\x8a\xe1" + "\x77\x79\xcc\xd5\x20\xbf\x83\xf4" + "\x4b\xad\x11\xbf\xdb\x47\x65\x70" + "\x43\xf3\x65\xdf\xb7\xdc\xb2\xb9" + "\xaa\x3f\xb3\xdf\x79\x69\x0d\xa0" + "\x86\x1c\xba\x48\x0b\x01\xc1\x88" + "\xdf\x03\xb1\x06\x3c\x1d\x56\xa1" + "\x8e\x98\xc1\xa6\x95\xa2\x5b\x72" + "\x76\x59\xd2\x26\x25\xcd\xef\x7c" + "\xc9\x60\xea\x43\xd1\x12\x8a\x8a" + "\x63\x12\x78\xcb\x2f\x88\x1e\x88" + "\x78\x59\xde\xba\x4d\x2c\x78\x61" + "\x75\x37\x54\xfd\x80\xc7\x5e\x98" + "\xcf\x14\x62\x8e\xfb\x72\xee\x4d" + "\x9f\xaf\x8b\x09\xe5\x21\x0a\x91" + "\x8f\x88\x87\xd5\xb1\x84\xab\x18" + "\x08\x57\xed\x72\x35\xa6\x0e\xc6" + "\xff\xcb\xfe\x2c\x48\x39\x14\x44" + "\xba\x59\x32\x3a\x2d\xc4\x5f\xcb" + "\xbe\x68\x8e\x7b\xee\x21\xa4\x32" + "\x11\xa0\x99\xfd\x90\xde\x59\x43" + "\xeb\xed\xd5\x87\x68\x46\xc6\xde" + "\x0b\x07\x17\x59\x6a\xab\xca\x15" + "\x65\x02\x01\xb6\x71\x8c\x3b\xaa" + "\x18\x3b\x30\xae\x38\x5b\x2c\x74" + "\xd4\xee\x4a\xfc\xf7\x1b\x09\xd4" + "\xda\x8b\x1d\x5d\x6f\x21\x6c", + .klen = 16, + .len = 255, + }, + { + .key = "\x24\xf6\xe1\x62\xe5\xaf\x99\xda" + "\x84\xec\x41\xb0\xa3\x0b\xd5\xa8" + "\xa0\x3e\x7b\xa6\xdd\x6c\x8f\xa8", + .iv = "\x7f\x80\x24\x62\x32\xdd\xab\x66" + "\xf2\x87\x29\x24\xec\xd2\x4b\x9f" + "\x0c\x33\x52\xd9\xe0\xcc\x6e\xe4" + "\x90\x85\x43\x97\xc4\x62\x14\x33", + .ptext = "\xef\x58\xe7\x7f\xa9\xd9\xb8\xd7" + "\xa2\x91\x97\x07\x27\x9e\xba\xe8" + "\xaa", + .ctext = "\xd7\xc3\x81\x91\xf2\x40\x17\x73" + "\x3e\x3b\x1c\x2a\x8e\x11\x9c\x17" + "\xf1", + .klen = 24, + .len = 17, + }, + { + .key = "\xbf\xaf\xd7\x67\x8c\x47\xcf\x21" + "\x8a\xa5\xdd\x32\x25\x47\xbe\x4f" + "\xf1\x3a\x0b\xa6\xaa\x2d\xcf\x09", + .iv = "\xd9\xe8\xf0\x92\x4e\xfc\x1d\xf2" + "\x81\x37\x7c\x8f\xf1\x59\x09\x20" + "\xf4\x46\x51\x86\x4f\x54\x8b\x32" + "\x58\xd1\x99\x8b\x8c\x03\xeb\x5d", + .ptext = "\xcd\x64\x90\xf9\x7c\xe5\x0e\x5a" + "\x75\xe7\x8e\x39\x86\xec\x20\x43" + "\x8a\x49\x09\x15\x47\xf4\x3c\x89" + "\x21\xeb\xcf\x4e\xcf\x91\xb5\x40" + "\xcd\xe5\x4d\x5c\x6f\xf2\xd2\x80" + "\xfa\xab\xb3\x76\x9f\x7f\x84\x0a", + .ctext = "\x44\x98\x64\x15\xb7\x0b\x80\xa3" + "\xb9\xca\x23\xff\x3b\x0b\x68\x74" + "\xbb\x3e\x20\x19\x9f\x28\x71\x2a" + "\x48\x3c\x7c\xe2\xef\xb5\x10\xac" + "\x82\x9f\xcd\x08\x8f\x6b\x16\x6f" + "\xc3\xbb\x07\xfb\x3c\xb0\x1b\x27", + .klen = 24, + .len = 48, + }, + { + .key = "\xb8\x35\xa2\x5f\x86\xbb\x82\x99" + "\x27\xeb\x01\x3f\x92\xaf\x80\x24" + "\x4c\x66\xa2\x89\xff\x2e\xa2\x25", + .iv = "\x0a\x1d\x96\xd3\xe0\xe8\x0c\x9b" + "\x9d\x6f\x21\x97\xc2\x17\xdb\x39" + "\x3f\xd8\x64\x48\x80\x04\xee\x43" + "\x02\xce\x88\xe2\x81\x81\x5f\x81", + .ptext = "\xb8\xf9\x16\x8b\x25\x68\xd0\x9c" + "\xd2\x28\xac\xa8\x79\xc2\x30\xc1" + "\x31\xde\x1c\x37\x1b\xa2\xb5\xe6" + "\xf0\xd0\xf8\x9c\x7f\xc6\x46\x07" + "\x5c\xc3\x06\xe4\xf0\x02\xec\xf8" + "\x59\x7c\xc2\x5d\xf8\x0c\x21\xae" + "\x9e\x82\xb1\x1a\x5f\x78\x44\x15" + "\x00\xa7\x2e\x52\xc5\x98\x98\x35" + "\x03\xae\xd0\x8e\x07\x57\xe2\x5a" + "\x17\xbf\x52\x40\x54\x5b\x74\xe5" + "\x2d\x35\xaf\x9e\x37\xf7\x7e\x4a" + "\x8c\x9e\xa1\xdc\x40\xb4\x5b\x36" + "\xdc\x3a\x68\xe6\xb7\x35\x0b\x8a" + "\x90\xec\x74\x8f\x09\x9a\x7f\x02" + "\x4d\x03\x46\x35\x62\xb1\xbd\x08" + "\x3f\x54\x2a\x10\x0b\xdc\x69\xaf" + "\x25\x3a\x0c\x5f\xe0\x51\xe7\x11" + "\xb7\x00\xab\xbb\x9a\xb0\xdc\x4d" + "\xc3\x7d\x1a\x6e\xd1\x09\x52\xbd" + "\x6b\x43\x55\x22\x3a\x78\x14\x7d" + "\x79\xfd\x8d\xfc\x9b\x1d\x0f\xa2" + "\xc7\xb9\xf8\x87\xd5\x96\x50\x61" + "\xa7\x5e\x1e\x57\x97\xe0\xad\x2f" + "\x93\xe6\xe8\x83\xec\x85\x26\x5e" + "\xd9\x2a\x15\xe0\xe9\x09\x25\xa1" + "\x77\x2b\x88\xdc\xa4\xa5\x48\xb6" + "\xf7\xcc\xa6\xa9\xba\xf3\x42\x5c" + "\x70\x9d\xe9\x29\xc1\xf1\x33\xdd" + "\x56\x48\x17\x86\x14\x51\x5c\x10" + "\xab\xfd\xd3\x26\x8c\x21\xf5\x93" + "\x1b\xeb\x47\x97\x73\xbb\x88\x10" + "\xf3\xfe\xf5\xde\xf3\x2e\x05\x46" + "\x1c\x0d\xa3\x10\x48\x9c\x71\x16" + "\x78\x33\x4d\x0a\x74\x3b\xe9\x34" + "\x0b\xa7\x0e\x9e\x61\xe9\xe9\xfd" + "\x85\xa0\xcb\x19\xfd\x7c\x33\xe3" + "\x0e\xce\xc2\x6f\x9d\xa4\x2d\x77" + "\xfd\xad\xee\x5e\x08\x3e\xd7\xf5" + "\xfb\xc3\xd7\x93\x96\x08\x96\xca" + "\x58\x81\x16\x9b\x98\x0a\xe2\xef" + "\x7f\xda\x40\xe4\x1f\x46\x9e\x67" + "\x2b\x84\xcb\x42\xc4\xd6\x6a\xcf" + "\x2d\xb2\x33\xc0\x56\xb3\x35\x6f" + "\x29\x36\x8f\x6a\x5b\xec\xd5\x4f" + "\xa0\x70\xff\xb6\x5b\xde\x6a\x93" + "\x20\x3c\xe2\x76\x7a\xef\x3c\x79" + "\x31\x65\xce\x3a\x0e\xd0\xbe\xa8" + "\x21\x95\xc7\x2b\x62\x8e\x67\xdd" + "\x20\x79\xe4\xe5\x01\x15\xc0\xec" + "\x0f\xd9\x23\xc8\xca\xdf\xd4\x7d" + "\x1d\xf8\x64\x4f\x56\xb1\x83\xa7" + "\x43\xbe\xfc\xcf\xc2\x8c\x33\xda" + "\x36\xd0\x52\xef\x9e\x9e\x88\xf4" + "\xa8\x21\x0f\xaa\xee\x8d\xa0\x24" + "\x4d\xcb\xb1\x72\x07\xf0\xc2\x06" + "\x60\x65\x85\x84\x2c\x60\xcf\x61" + "\xe7\x56\x43\x5b\x2b\x50\x74\xfa" + "\xdb\x4e\xea\x88\xd4\xb3\x83\x8f" + "\x6f\x97\x4b\x57\x7a\x64\x64\xae" + "\x0a\x37\x66\xc5\x03\xad\xb5\xf9" + "\x08\xb0\x3a\x74\xde\x97\x51\xff" + "\x48\x4f\x5c\xa4\xf8\x7a\xb4\x05" + "\x27\x70\x52\x86\x1b\x78\xfc\x18" + "\x06\x27\xa9\x62\xf7\xda\xd2\x8e", + .ctext = "\x3b\xe1\xdb\xb3\xc5\x9a\xde\x69" + "\x58\x05\xcc\xeb\x02\x51\x78\x4a" + "\xac\x28\xe9\xed\xd1\xc9\x15\x7d" + "\x33\x7d\xc1\x47\x12\x41\x11\xf8" + "\x4a\x2c\xb7\xa3\x41\xbe\x59\xf7" + "\x22\xdb\x2c\xda\x9c\x00\x61\x9b" + "\x73\xb3\x0b\x84\x2b\xc1\xf3\x80" + "\x84\xeb\x19\x60\x80\x09\xe1\xcd" + "\x16\x3a\x20\x23\xc4\x82\x4f\xba" + "\x3b\x8e\x55\xd7\xa9\x0b\x75\xd0" + "\xda\xce\xd2\xee\x7e\x4b\x7f\x65" + "\x4d\x28\xc5\xd3\x15\x2c\x40\x96" + "\x52\xd4\x18\x61\x2b\xe7\x83\xec" + "\x89\x62\x9c\x4c\x50\xe6\xe2\xbb" + "\x25\xa1\x0f\xa7\xb0\xb4\xb2\xde" + "\x54\x20\xae\xa3\x56\xa5\x26\x4c" + "\xd5\xcc\xe5\xcb\x28\x44\xb1\xef" + "\x67\x2e\x93\x6d\x00\x88\x83\x9a" + "\xf2\x1c\x48\x38\xec\x1a\x24\x90" + "\x73\x0a\xdb\xe8\xce\x95\x7a\x2c" + "\x8c\xe9\xb7\x07\x1d\xb3\xa3\x20" + "\xbe\xad\x61\x84\xac\xde\x76\xb5" + "\xa6\x28\x29\x47\x63\xc4\xfc\x13" + "\x3f\x71\xfb\x58\x37\x34\x82\xed" + "\x9e\x05\x19\x1f\xc1\x67\xc1\xab" + "\xf5\xfd\x7c\xea\xfa\xa4\xf8\x0a" + "\xac\x4c\x92\xdf\x65\x73\xd7\xdb" + "\xed\x2c\xe0\x84\x5f\x57\x8c\x76" + "\x3e\x05\xc0\xc3\x68\x96\x95\x0b" + "\x88\x97\xfe\x2e\x99\xd5\xc2\xb9" + "\x53\x9f\xf3\x32\x10\x1f\x1f\x5d" + "\xdf\x21\x95\x70\x91\xe8\xa1\x3e" + "\x19\x3e\xb6\x0b\xa8\xdb\xf8\xd4" + "\x54\x27\xb8\xab\x5d\x78\x0c\xe6" + "\xb7\x08\xee\xa4\xb6\x6b\xeb\x5a" + "\x89\x69\x2b\xbd\xd4\x21\x5b\xbf" + "\x79\xbb\x0f\xff\xdb\x23\x9a\xeb" + "\x8d\xf2\xc4\x39\xb4\x90\x77\x6f" + "\x68\xe2\xb8\xf3\xf1\x65\x4f\xd5" + "\x24\x80\x06\xaf\x7c\x8d\x15\x0c" + "\xfd\x56\xe5\xe3\x01\xa5\xf7\x1c" + "\x31\xd6\xa2\x01\x1e\x59\xf9\xa9" + "\x42\xd5\xc2\x34\xda\x25\xde\xc6" + "\x5d\x38\xef\xd1\x4c\xc1\xd9\x1b" + "\x98\xfd\xcd\x57\x6f\xfd\x46\x91" + "\x90\x3d\x52\x2b\x2c\x7d\xcf\x71" + "\xcf\xd1\x77\x23\x71\x36\xb1\xce" + "\xc7\x5d\xf0\x5b\x44\x3d\x43\x71" + "\xac\xb8\xa0\x6a\xea\x89\x5c\xff" + "\x81\x73\xd4\x83\xd1\xc9\xe9\xe2" + "\xa8\xa6\x0f\x36\xe6\xaa\x57\xd4" + "\x27\xd2\xc9\xda\x94\x02\x1f\xfb" + "\xe1\xa1\x07\xbe\xe1\x1b\x15\x94" + "\x1e\xac\x2f\x57\xbb\x41\x22\xaf" + "\x60\x5e\xcc\x66\xcb\x16\x62\xab" + "\xb8\x7c\x99\xf4\x84\x93\x0c\xc2" + "\xa2\x49\xe4\xfd\x17\x55\xe1\xa6" + "\x8d\x5b\xc6\x1b\xc8\xac\xec\x11" + "\x33\xcf\xb0\xe8\xc7\x28\x4f\xb2" + "\x5c\xa6\xe2\x71\xab\x80\x0a\xa7" + "\x5c\x59\x50\x9f\x7a\x32\xb7\xe5" + "\x24\x9a\x8e\x25\x21\x2e\xb7\x18" + "\xd0\xf2\xe7\x27\x6f\xda\xc1\x00" + "\xd9\xa6\x03\x59\xac\x4b\xcb\xba", + .klen = 24, + .len = 512, + }, + { + .key = "\x9e\xeb\xb2\x49\x3c\x1c\xf5\xf4" + "\x6a\x99\xc2\xc4\xdf\xb1\xf4\xdd" + "\x75\x20\x57\xea\x2c\x4f\xcd\xb2" + "\xa5\x3d\x7b\x49\x1e\xab\xfd\x0f", + .iv = "\xdf\x63\xd4\xab\xd2\x49\xf3\xd8" + "\x33\x81\x37\x60\x7d\xfa\x73\x08" + "\xd8\x49\x6d\x80\xe8\x2f\x62\x54" + "\xeb\x0e\xa9\x39\x5b\x45\x7f\x8a", + .ptext = "\x67\xc9\xf2\x30\x84\x41\x8e\x43" + "\xfb\xf3\xb3\x3e\x79\x36\x7f\xe8", + .ctext = "\x27\x38\x78\x47\x16\xd9\x71\x35" + "\x2e\x7e\xdd\x7e\x43\x3c\xb8\x40", + .klen = 32, + .len = 16, + }, + { + .key = "\x93\xfa\x7e\xe2\x0e\x67\xc4\x39" + "\xe7\xca\x47\x95\x68\x9d\x5e\x5a" + "\x7c\x26\x19\xab\xc6\xca\x6a\x4c" + "\x45\xa6\x96\x42\xae\x6c\xff\xe7", + .iv = "\xea\x82\x47\x95\x3b\x22\xa1\x3a" + "\x6a\xca\x24\x4c\x50\x7e\x23\xcd" + "\x0e\x50\xe5\x41\xb6\x65\x29\xd8" + "\x30\x23\x00\xd2\x54\xa7\xd6\x56", + .ptext = "\xdb\x1f\x1f\xec\xad\x83\x6e\x5d" + "\x19\xa5\xf6\x3b\xb4\x93\x5a\x57" + "\x6f", + .ctext = "\xf1\x46\x6e\x9d\xb3\x01\xf0\x6b" + "\xc2\xac\x57\x88\x48\x6d\x40\x72" + "\x68", + .klen = 32, + .len = 17, + }, + { + .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99" + "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27" + "\xcc\x16\xd7\x2b\x85\x63\x99\xd3" + "\xba\x96\xa1\xdb\xd2\x60\x68\xda", + .iv = "\xef\x58\x69\xb1\x2c\x5e\x9a\x47" + "\x24\xc1\xb1\x69\xe1\x12\x93\x8f" + "\x43\x3d\x6d\x00\xdb\x5e\xd8\xd9" + "\x12\x9a\xfe\xd9\xff\x2d\xaa\xc4", + .ptext = "\x5e\xa8\x68\x19\x85\x98\x12\x23" + "\x26\x0a\xcc\xdb\x0a\x04\xb9\xdf" + "\x4d\xb3\x48\x7b\xb0\xe3\xc8\x19" + "\x43\x5a\x46\x06\x94\x2d\xf2", + .ctext = "\xdb\xfd\xc8\x03\xd0\xec\xc1\xfe" + "\xbd\x64\x37\xb8\x82\x43\x62\x4e" + "\x7e\x54\xa3\xe2\x24\xa7\x27\xe8" + "\xa4\xd5\xb3\x6c\xb2\x26\xb4", + .klen = 32, + .len = 31, + }, + { + .key = "\x03\x65\x03\x6e\x4d\xe6\xe8\x4e" + "\x8b\xbe\x22\x19\x48\x31\xee\xd9" + "\xa0\x91\x21\xbe\x62\x89\xde\x78" + "\xd9\xb0\x36\xa3\x3c\xce\x43\xd5", + .iv = "\xa9\xc3\x4b\xe7\x0f\xfc\x6d\xbf" + "\x56\x27\x21\x1c\xfc\xd6\x04\x10" + "\x5f\x43\xe2\x30\x35\x29\x6c\x10" + "\x90\xf1\xbf\x61\xed\x0f\x8a\x91", + .ptext = "\x07\xaa\x02\x26\xb4\x98\x11\x5e" + "\x33\x41\x21\x51\x51\x63\x2c\x72" + "\x00\xab\x32\xa7\x1c\xc8\x3c\x9c" + "\x25\x0e\x8b\x9a\xdf\x85\xed\x2d" + "\xf4\xf2\xbc\x55\xca\x92\x6d\x22" + "\xfd\x22\x3b\x42\x4c\x0b\x74\xec", + .ctext = "\x7b\xb1\x43\x6d\xd8\x72\x6c\xf6" + "\x67\x6a\x00\xc4\xf1\xf0\xf5\xa4" + "\xfc\x60\x91\xab\x46\x0b\x15\xfc" + "\xd7\xc1\x28\x15\xa1\xfc\xf7\x68" + "\x8e\xcc\x27\x62\x00\x64\x56\x72" + "\xa6\x17\xd7\x3f\x67\x80\x10\x58", + .klen = 32, + .len = 48, + }, + { + .key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7" + "\x05\x91\x8f\xee\x85\x1f\x35\x7f" + "\x80\x3d\xfc\x9b\x94\xf6\xfc\x9e" + "\x19\x09\x00\xa9\x04\x31\x4f\x11", + .iv = "\xa1\xba\x49\x95\xff\x34\x6d\xb8" + "\xcd\x87\x5d\x5e\xfd\xea\x85\xdb" + "\x8a\x7b\x5e\xb2\x5d\x57\xdd\x62" + "\xac\xa9\x8c\x41\x42\x94\x75\xb7", + .ptext = "\x69\xb4\xe8\x8c\x37\xe8\x67\x82" + "\xf1\xec\x5d\x04\xe5\x14\x91\x13" + "\xdf\xf2\x87\x1b\x69\x81\x1d\x71" + "\x70\x9e\x9c\x3b\xde\x49\x70\x11" + "\xa0\xa3\xdb\x0d\x54\x4f\x66\x69" + "\xd7\xdb\x80\xa7\x70\x92\x68\xce" + "\x81\x04\x2c\xc6\xab\xae\xe5\x60" + "\x15\xe9\x6f\xef\xaa\x8f\xa7\xa7" + "\x63\x8f\xf2\xf0\x77\xf1\xa8\xea" + "\xe1\xb7\x1f\x9e\xab\x9e\x4b\x3f" + "\x07\x87\x5b\x6f\xcd\xa8\xaf\xb9" + "\xfa\x70\x0b\x52\xb8\xa8\xa7\x9e" + "\x07\x5f\xa6\x0e\xb3\x9b\x79\x13" + "\x79\xc3\x3e\x8d\x1c\x2c\x68\xc8" + "\x51\x1d\x3c\x7b\x7d\x79\x77\x2a" + "\x56\x65\xc5\x54\x23\x28\xb0\x03", + .ctext = "\xeb\xf9\x98\x86\x3c\x40\x9f\x16" + "\x84\x01\xf9\x06\x0f\xeb\x3c\xa9" + "\x4c\xa4\x8e\x5d\xc3\x8d\xe5\xd3" + "\xae\xa6\xe6\xcc\xd6\x2d\x37\x4f" + "\x99\xc8\xa3\x21\x46\xb8\x69\xf2" + "\xe3\x14\x89\xd7\xb9\xf5\x9e\x4e" + "\x07\x93\x6f\x78\x8e\x6b\xea\x8f" + "\xfb\x43\xb8\x3e\x9b\x4c\x1d\x7e" + "\x20\x9a\xc5\x87\xee\xaf\xf6\xf9" + "\x46\xc5\x18\x8a\xe8\x69\xe7\x96" + "\x52\x55\x5f\x00\x1e\x1a\xdc\xcc" + "\x13\xa5\xee\xff\x4b\x27\xca\xdc" + "\x10\xa6\x48\x76\x98\x43\x94\xa3" + "\xc7\xe2\xc9\x65\x9b\x08\x14\x26" + "\x1d\x68\xfb\x15\x0a\x33\x49\x84" + "\x84\x33\x5a\x1b\x24\x46\x31\x92", + .klen = 32, + .len = 128, + }, + { + .key = "\x36\x45\x11\xa2\x98\x5f\x96\x7c" + "\xc6\xb4\x94\x31\x0a\x67\x09\x32" + "\x6c\x6f\x6f\x00\xf0\x17\xcb\xac" + "\xa5\xa9\x47\x9e\x2e\x85\x2f\xfa", + .iv = "\x28\x88\xaa\x9b\x59\x3b\x1e\x97" + "\x82\xe5\x5c\x9e\x6d\x14\x11\x19" + "\x6e\x38\x8f\xd5\x40\x2b\xca\xf9" + "\x7b\x4c\xe4\xa3\xd0\xd2\x8a\x13", + .ptext = "\x95\xd2\xf7\x71\x1b\xca\xa5\x86" + "\xd9\x48\x01\x93\x2f\x79\x55\x29" + "\x71\x13\x15\x0e\xe6\x12\xbc\x4d" + "\x8a\x31\xe3\x40\x2a\xc6\x5e\x0d" + "\x68\xbb\x4a\x62\x8d\xc7\x45\x77" + "\xd2\xb8\xc7\x1d\xf1\xd2\x5d\x97" + "\xcf\xac\x52\xe5\x32\x77\xb6\xda" + "\x30\x85\xcf\x2b\x98\xe9\xaa\x34" + "\x62\xb5\x23\x9e\xb7\xa6\xd4\xe0" + "\xb4\x58\x18\x8c\x4d\xde\x4d\x01" + "\x83\x89\x24\xca\xfb\x11\xd4\x82" + "\x30\x7a\x81\x35\xa0\xb4\xd4\xb6" + "\x84\xea\x47\x91\x8c\x19\x86\x25" + "\xa6\x06\x8d\x78\xe6\xed\x87\xeb" + "\xda\xea\x73\x7c\xbf\x66\xb8\x72" + "\xe3\x0a\xb8\x0c\xcb\x1a\x73\xf1" + "\xa7\xca\x0a\xde\x57\x2b\xbd\x2b" + "\xeb\x8b\x24\x38\x22\xd3\x0e\x1f" + "\x17\xa0\x84\x98\x31\x77\xfd\x34" + "\x6a\x4e\x3d\x84\x4c\x0e\xfb\xed" + "\xc8\x2a\x51\xfa\xd8\x73\x21\x8a" + "\xdb\xb5\xfe\x1f\xee\xc4\xe8\x65" + "\x54\x84\xdd\x96\x6d\xfd\xd3\x31" + "\x77\x36\x52\x6b\x80\x4f\x9e\xb4" + "\xa2\x55\xbf\x66\x41\x49\x4e\x87" + "\xa7\x0c\xca\xe7\xa5\xc5\xf6\x6f" + "\x27\x56\xe2\x48\x22\xdd\x5f\x59" + "\x3c\xf1\x9f\x83\xe5\x2d\xfb\x71" + "\xad\xd1\xae\x1b\x20\x5c\x47\xb7" + "\x3b\xd3\x14\xce\x81\x42\xb1\x0a" + "\xf0\x49\xfa\xc2\xe7\x86\xbf\xcd" + "\xb0\x95\x9f\x8f\x79\x41\x54", + .ctext = "\xf6\x57\x51\xc4\x25\x61\x2d\xfa" + "\xd6\xd9\x3f\x9a\x81\x51\xdd\x8e" + "\x3d\xe7\xaa\x2d\xb1\xda\xc8\xa6" + "\x9d\xaa\x3c\xab\x62\xf2\x80\xc3" + "\x2c\xe7\x58\x72\x1d\x44\xc5\x28" + "\x7f\xb4\xf9\xbc\x9c\xb2\xab\x8e" + "\xfa\xd1\x4d\x72\xd9\x79\xf5\xa0" + "\x24\x3e\x90\x25\x31\x14\x38\x45" + "\x59\xc8\xf6\xe2\xc6\xf6\xc1\xa7" + "\xb2\xf8\xa7\xa9\x2b\x6f\x12\x3a" + "\xb0\x81\xa4\x08\x57\x59\xb1\x56" + "\x4c\x8f\x18\x55\x33\x5f\xd6\x6a" + "\xc6\xa0\x4b\xd6\x6b\x64\x3e\x9e" + "\xfd\x66\x16\xe2\xdb\xeb\x5f\xb3" + "\x50\x50\x3e\xde\x8d\x72\x76\x01" + "\xbe\xcc\xc9\x52\x09\x2d\x8d\xe7" + "\xd6\xc3\x66\xdb\x36\x08\xd1\x77" + "\xc8\x73\x46\x26\x24\x29\xbf\x68" + "\x2d\x2a\x99\x43\x56\x55\xe4\x93" + "\xaf\xae\x4d\xe7\x55\x4a\xc0\x45" + "\x26\xeb\x3b\x12\x90\x7c\xdc\xd1" + "\xd5\x6f\x0a\xd0\xa9\xd7\x4b\x89" + "\x0b\x07\xd8\x86\xad\xa1\xc4\x69" + "\x1f\x5e\x8b\xc4\x9e\x91\x41\x25" + "\x56\x98\x69\x78\x3a\x9e\xae\x91" + "\xd8\xd9\xfa\xfb\xff\x81\x25\x09" + "\xfc\xed\x2d\x87\xbc\x04\x62\x97" + "\x35\xe1\x26\xc2\x46\x1c\xcf\xd7" + "\x14\xed\x02\x09\xa5\xb2\xb6\xaa" + "\x27\x4e\x61\xb3\x71\x6b\x47\x16" + "\xb7\xe8\xd4\xaf\x52\xeb\x6a\x6b" + "\xdb\x4c\x65\x21\x9e\x1c\x36", + .klen = 32, + .len = 255, + }, + { + .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a" + "\x25\x74\x29\x0d\x51\x8a\x0e\x13" + "\xc1\x53\x5d\x30\x8d\xee\x75\x0d" + "\x14\xd6\x69\xc9\x15\xa9\x0c\x60", + .iv = "\x65\x9b\xd4\xa8\x7d\x29\x1d\xf4" + "\xc4\xd6\x9b\x6a\x28\xab\x64\xe2" + "\x62\x81\x97\xc5\x81\xaa\xf9\x44" + "\xc1\x72\x59\x82\xaf\x16\xc8\x2c", + .ptext = "\xc7\x6b\x52\x6a\x10\xf0\xcc\x09" + "\xc1\x12\x1d\x6d\x21\xa6\x78\xf5" + "\x05\xa3\x69\x60\x91\x36\x98\x57" + "\xba\x0c\x14\xcc\xf3\x2d\x73\x03" + "\xc6\xb2\x5f\xc8\x16\x27\x37\x5d" + "\xd0\x0b\x87\xb2\x50\x94\x7b\x58" + "\x04\xf4\xe0\x7f\x6e\x57\x8e\xc9" + "\x41\x84\xc1\xb1\x7e\x4b\x91\x12" + "\x3a\x8b\x5d\x50\x82\x7b\xcb\xd9" + "\x9a\xd9\x4e\x18\x06\x23\x9e\xd4" + "\xa5\x20\x98\xef\xb5\xda\xe5\xc0" + "\x8a\x6a\x83\x77\x15\x84\x1e\xae" + "\x78\x94\x9d\xdf\xb7\xd1\xea\x67" + "\xaa\xb0\x14\x15\xfa\x67\x21\x84" + "\xd3\x41\x2a\xce\xba\x4b\x4a\xe8" + "\x95\x62\xa9\x55\xf0\x80\xad\xbd" + "\xab\xaf\xdd\x4f\xa5\x7c\x13\x36" + "\xed\x5e\x4f\x72\xad\x4b\xf1\xd0" + "\x88\x4e\xec\x2c\x88\x10\x5e\xea" + "\x12\xc0\x16\x01\x29\xa3\xa0\x55" + "\xaa\x68\xf3\xe9\x9d\x3b\x0d\x3b" + "\x6d\xec\xf8\xa0\x2d\xf0\x90\x8d" + "\x1c\xe2\x88\xd4\x24\x71\xf9\xb3" + "\xc1\x9f\xc5\xd6\x76\x70\xc5\x2e" + "\x9c\xac\xdb\x90\xbd\x83\x72\xba" + "\x6e\xb5\xa5\x53\x83\xa9\xa5\xbf" + "\x7d\x06\x0e\x3c\x2a\xd2\x04\xb5" + "\x1e\x19\x38\x09\x16\xd2\x82\x1f" + "\x75\x18\x56\xb8\x96\x0b\xa6\xf9" + "\xcf\x62\xd9\x32\x5d\xa9\xd7\x1d" + "\xec\xe4\xdf\x1b\xbe\xf1\x36\xee" + "\xe3\x7b\xb5\x2f\xee\xf8\x53\x3d" + "\x6a\xb7\x70\xa9\xfc\x9c\x57\x25" + "\xf2\x89\x10\xd3\xb8\xa8\x8c\x30" + "\xae\x23\x4f\x0e\x13\x66\x4f\xe1" + "\xb6\xc0\xe4\xf8\xef\x93\xbd\x6e" + "\x15\x85\x6b\xe3\x60\x81\x1d\x68" + "\xd7\x31\x87\x89\x09\xab\xd5\x96" + "\x1d\xf3\x6d\x67\x80\xca\x07\x31" + "\x5d\xa7\xe4\xfb\x3e\xf2\x9b\x33" + "\x52\x18\xc8\x30\xfe\x2d\xca\x1e" + "\x79\x92\x7a\x60\x5c\xb6\x58\x87" + "\xa4\x36\xa2\x67\x92\x8b\xa4\xb7" + "\xf1\x86\xdf\xdc\xc0\x7e\x8f\x63" + "\xd2\xa2\xdc\x78\xeb\x4f\xd8\x96" + "\x47\xca\xb8\x91\xf9\xf7\x94\x21" + "\x5f\x9a\x9f\x5b\xb8\x40\x41\x4b" + "\x66\x69\x6a\x72\xd0\xcb\x70\xb7" + "\x93\xb5\x37\x96\x05\x37\x4f\xe5" + "\x8c\xa7\x5a\x4e\x8b\xb7\x84\xea" + "\xc7\xfc\x19\x6e\x1f\x5a\xa1\xac" + "\x18\x7d\x52\x3b\xb3\x34\x62\x99" + "\xe4\x9e\x31\x04\x3f\xc0\x8d\x84" + "\x17\x7c\x25\x48\x52\x67\x11\x27" + "\x67\xbb\x5a\x85\xca\x56\xb2\x5c" + "\xe6\xec\xd5\x96\x3d\x15\xfc\xfb" + "\x22\x25\xf4\x13\xe5\x93\x4b\x9a" + "\x77\xf1\x52\x18\xfa\x16\x5e\x49" + "\x03\x45\xa8\x08\xfa\xb3\x41\x92" + "\x79\x50\x33\xca\xd0\xd7\x42\x55" + "\xc3\x9a\x0c\x4e\xd9\xa4\x3c\x86" + "\x80\x9f\x53\xd1\xa4\x2e\xd1\xbc" + "\xf1\x54\x6e\x93\xa4\x65\x99\x8e" + "\xdf\x29\xc0\x64\x63\x07\xbb\xea", + .ctext = "\x9f\x72\x87\xc7\x17\xfb\x20\x15" + "\x65\xb3\x55\xa8\x1c\x8e\x52\x32" + "\xb1\x82\x8d\xbf\xb5\x9f\x10\x0a" + "\xe8\x0c\x70\x62\xef\x89\xb6\x1f" + "\x73\xcc\xe4\xcc\x7a\x3a\x75\x4a" + "\x26\xe7\xf5\xd7\x7b\x17\x39\x2d" + "\xd2\x27\x6e\xf9\x2f\x9e\xe2\xf6" + "\xfa\x16\xc2\xf2\x49\x26\xa7\x5b" + "\xe7\xca\x25\x0e\x45\xa0\x34\xc2" + "\x9a\x37\x79\x7e\x7c\x58\x18\x94" + "\x10\xa8\x7c\x48\xa9\xd7\x63\x89" + "\x9e\x61\x4d\x26\x34\xd9\xf0\xb1" + "\x2d\x17\x2c\x6f\x7c\x35\x0e\xbe" + "\x77\x71\x7c\x17\x5b\xab\x70\xdb" + "\x2f\x54\x0f\xa9\xc8\xf4\xf5\xab" + "\x52\x04\x3a\xb8\x03\xa7\xfd\x57" + "\x45\x5e\xbc\x77\xe1\xee\x79\x8c" + "\x58\x7b\x1f\xf7\x75\xde\x68\x17" + "\x98\x85\x8a\x18\x5c\xd2\x39\x78" + "\x7a\x6f\x26\x6e\xe1\x13\x91\xdd" + "\xdf\x0e\x6e\x67\xcc\x51\x53\xd8" + "\x17\x5e\xce\xa7\xe4\xaf\xfa\xf3" + "\x4f\x9f\x01\x9b\x04\xe7\xfc\xf9" + "\x6a\xdc\x1d\x0c\x9a\xaa\x3a\x7a" + "\x73\x03\xdf\xbf\x3b\x82\xbe\xb0" + "\xb4\xa4\xcf\x07\xd7\xde\x71\x25" + "\xc5\x10\xee\x0a\x15\x96\x8b\x4f" + "\xfe\xb8\x28\xbd\x4a\xcd\xeb\x9f" + "\x5d\x00\xc1\xee\xe8\x16\x44\xec" + "\xe9\x7b\xd6\x85\x17\x29\xcf\x58" + "\x20\xab\xf7\xce\x6b\xe7\x71\x7d" + "\x4f\xa8\xb0\xe9\x7d\x70\xd6\x0b" + "\x2e\x20\xb1\x1a\x63\x37\xaa\x2c" + "\x94\xee\xd5\xf6\x58\x2a\xf4\x7a" + "\x4c\xba\xf5\xe9\x3c\x6f\x95\x13" + "\x5f\x96\x81\x5b\xb5\x62\xf2\xd7" + "\x8d\xbe\xa1\x31\x51\xe6\xfe\xc9" + "\x07\x7d\x0f\x00\x3a\x66\x8c\x4b" + "\x94\xaa\xe5\x56\xde\xcd\x74\xa7" + "\x48\x67\x6f\xed\xc9\x6a\xef\xaf" + "\x9a\xb7\xae\x60\xfa\xc0\x37\x39" + "\xa5\x25\xe5\x22\xea\x82\x55\x68" + "\x3e\x30\xc3\x5a\xb6\x29\x73\x7a" + "\xb6\xfb\x34\xee\x51\x7c\x54\xe5" + "\x01\x4d\x72\x25\x32\x4a\xa3\x68" + "\x80\x9a\x89\xc5\x11\x66\x4c\x8c" + "\x44\x50\xbe\xd7\xa0\xee\xa6\xbb" + "\x92\x0c\xe6\xd7\x83\x51\xb1\x69" + "\x63\x40\xf3\xf4\x92\x84\xc4\x38" + "\x29\xfb\xb4\x84\xa0\x19\x75\x16" + "\x60\xbf\x0a\x9c\x89\xee\xad\xb4" + "\x43\xf9\x71\x39\x45\x7c\x24\x83" + "\x30\xbb\xee\x28\xb0\x86\x7b\xec" + "\x93\xc1\xbf\xb9\x97\x1b\x96\xef" + "\xee\x58\x35\x61\x12\x19\xda\x25" + "\x77\xe5\x80\x1a\x31\x27\x9b\xe4" + "\xda\x8b\x7e\x51\x4d\xcb\x01\x19" + "\x4f\xdc\x92\x1a\x17\xd5\x6b\xf4" + "\x50\xe3\x06\xe4\x76\x9f\x65\x00" + "\xbd\x7a\xe2\x64\x26\xf2\xe4\x7e" + "\x40\xf2\x80\xab\x62\xd5\xef\x23" + "\x8b\xfb\x6f\x24\x6e\x9b\x66\x0e" + "\xf4\x1c\x24\x1e\x1d\x26\x95\x09" + "\x94\x3c\xb2\xb6\x02\xa7\xd9\x9a", + .klen = 32, + .len = 512, + }, -static const struct hash_testvec blakes2s_256_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, - 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, - 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, - 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .digest = (u8[]){ 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, - 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, - 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, - 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x22, 0x27, 0xae, 0xaa, 0x6e, 0x81, 0x56, 0x03, - 0xa7, 0xe3, 0xa1, 0x18, 0xa5, 0x9a, 0x2c, 0x18, - 0xf4, 0x63, 0xbc, 0x16, 0x70, 0xf1, 0xe7, 0x4b, - 0x00, 0x6d, 0x66, 0x16, 0xae, 0x9e, 0x74, 0x4e, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0x58, 0x5d, 0xa8, 0x60, 0x1c, 0xa4, 0xd8, 0x03, - 0x86, 0x86, 0x84, 0x64, 0xd7, 0xa0, 0x8e, 0x15, - 0x2f, 0x05, 0xa2, 0x1b, 0xbc, 0xef, 0x7a, 0x34, - 0xb3, 0xc5, 0xbc, 0x4b, 0xf0, 0x32, 0xeb, 0x12, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, - 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, - 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, - 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x2e, 0x74, 0x1c, 0x1d, 0x03, 0xf4, 0x9d, 0x84, - 0x6f, 0xfc, 0x86, 0x32, 0x92, 0x49, 0x7e, 0x66, - 0xd7, 0xc3, 0x10, 0x88, 0xfe, 0x28, 0xb3, 0xe0, - 0xbf, 0x50, 0x75, 0xad, 0x8e, 0xa4, 0xe6, 0xb2, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xb9, 0xd2, 0x81, 0x0e, 0x3a, 0xb1, 0x62, 0x9b, - 0xad, 0x44, 0x05, 0xf4, 0x92, 0x2e, 0x99, 0xc1, - 0x4a, 0x47, 0xbb, 0x5b, 0x6f, 0xb2, 0x96, 0xed, - 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, }, -}}; +}; + +#ifdef __LITTLE_ENDIAN +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x08\x00\x01\x00" /* LE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#else +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x00\x08\x00\x01" /* BE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#endif + +static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D" + "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18", + .clen = 16 + 0 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF" + "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4" + "\x42\x1D\xFD\xF8\x97\x6C", + .clen = 16 + 6 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A" + "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2" + "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3", + .clen = 16 + 16 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36" + "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC" + "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34" + "\xA2\x87\xFF\xCB\xFC", + .clen = 16 + 21 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0" + "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74" + "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C", + .clen = 16 + 0 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7" + "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60" + "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2", + .clen = 16 + 6 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B" + "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6" + "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E" + "\x40\xC4\xFF\x25\x5B\x36\xA2\x66", + .clen = 16 + 16 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE" + "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2" + "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A" + "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62", + .clen = 16 + 21 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_camellia_cts_cmac[] = { + /* rfc6803 sec 10 */ + { + // "enc no plain" + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki + "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB" + "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki + "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA" + "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59" + "\xB8", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki + "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D" + "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D" + "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki + "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder + "13 bytes byte", // Plain + .plen = 16 + 13, + .ctext = + "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20" + "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15" + "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki + "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09" + "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66" + "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4" + "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5", + .clen = 16 + 30 + 16, + }, { + // "enc no plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1" + "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki + "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1" + "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83" + "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d" + "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki + "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32" + "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F" + "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A" + "\x12", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3" + "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki + "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74" + "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15" + "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C" + "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd" + "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki + "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25" + "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder + "13 bytes byte", + .plen = 16 + 13, + .ctext = + "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57" + "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1" + "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7" + "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki + "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e" + "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD" + "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB" + "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84" + "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74", + .clen = 16 + 30 + 16, + }, +}; #endif /* _CRYPTO_TESTMGR_H */ diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index d23fa531b91f..bf4f28742f77 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -25,9 +25,9 @@ * Third Edition. */ +#include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/bitops.h> -#include <linux/crypto.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> @@ -298,7 +298,7 @@ static const u32 mds[4][256] = { * multiplication is inefficient without hardware support. To multiply * faster, I make use of the fact x is a generator for the nonzero elements, * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for - * some n in 0..254. Note that that caret is exponentiation in GF(2^8), + * some n in 0..254. Note that caret is exponentiation in GF(2^8), * *not* polynomial notation. So if I want to compute pq where p and q are * in GF(2^8), I can just say: * 1. if p=0 or q=0 then pq=0 diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 86b2f067a416..368018cfa9bf 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -24,13 +24,13 @@ * Third Edition. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> +#include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <linux/bitops.h> /* Macros to compute the g() function in the encryption and decryption @@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(twofish_mod_init); +module_init(twofish_mod_init); module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/vmac.c b/crypto/vmac.c deleted file mode 100644 index 4633b2dda1e0..000000000000 --- a/crypto/vmac.c +++ /dev/null @@ -1,697 +0,0 @@ -/* - * VMAC: Message Authentication Code using Universal Hashing - * - * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 - * - * Copyright (c) 2009, Intel Corporation. - * Copyright (c) 2018, Google Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -/* - * Derived from: - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Last modified: 17 APR 08, 1700 PDT - */ - -#include <asm/unaligned.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <linux/module.h> -#include <linux/scatterlist.h> -#include <asm/byteorder.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/hash.h> - -/* - * User definable settings. - */ -#define VMAC_TAG_LEN 64 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ -#define VMAC_NONCEBYTES 16 - -/* per-transform (per-key) context */ -struct vmac_tfm_ctx { - struct crypto_cipher *cipher; - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; - u64 polykey[2*VMAC_TAG_LEN/64]; - u64 l3key[2*VMAC_TAG_LEN/64]; -}; - -/* per-request context */ -struct vmac_desc_ctx { - union { - u8 partial[VMAC_NHBYTES]; /* partial block */ - __le64 partial_words[VMAC_NHBYTES / 8]; - }; - unsigned int partial_size; /* size of the partial block */ - bool first_block_processed; - u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ - union { - u8 bytes[VMAC_NONCEBYTES]; - __be64 pads[VMAC_NONCEBYTES / 8]; - } nonce; - unsigned int nonce_size; /* nonce bytes filled so far */ -}; - -/* - * Constants and masks - */ -#define UINT64_C(x) x##ULL -static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ -static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ -static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ -static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ -static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ - -#define pe64_to_cpup le64_to_cpup /* Prefer little endian */ - -#ifdef __LITTLE_ENDIAN -#define INDEX_HIGH 1 -#define INDEX_LOW 0 -#else -#define INDEX_HIGH 0 -#define INDEX_LOW 1 -#endif - -/* - * The following routines are used in this implementation. They are - * written via macros to simulate zero-overhead call-by-reference. - * - * MUL64: 64x64->128-bit multiplication - * PMUL64: assumes top bits cleared on inputs - * ADD128: 128x128->128-bit addition - */ - -#define ADD128(rh, rl, ih, il) \ - do { \ - u64 _il = (il); \ - (rl) += (_il); \ - if ((rl) < (_il)) \ - (rh)++; \ - (rh) += (ih); \ - } while (0) - -#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) - -#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m >> 32), (m << 32)); \ - } while (0) - -#define MUL64(rh, rl, i1, i2) \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m1 = MUL32(_i1, _i2>>32); \ - u64 m2 = MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ - ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ - } while (0) - -/* - * For highest performance the L1 NH and L2 polynomial hashes should be - * carefully implemented to take advantage of one's target architecture. - * Here these two hash functions are defined multiple time; once for - * 64-bit architectures, once for 32-bit SSE2 architectures, and once - * for the rest (32-bit) architectures. - * For each, nh_16 *must* be defined (works on multiples of 16 bytes). - * Optionally, nh_vmac_nhbytes can be defined (for multiples of - * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two - * NH computations at once). - */ - -#ifdef CONFIG_64BIT - -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) - -#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) -#endif - -#define poly_step(ah, al, kh, kl, mh, ml) \ - do { \ - u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ - /* compute ab*cd, put bd into result registers */ \ - PMUL64(t3h, t3l, al, kh); \ - PMUL64(t2h, t2l, ah, kl); \ - PMUL64(t1h, t1l, ah, 2*kh); \ - PMUL64(ah, al, al, kl); \ - /* add 2 * ac to result */ \ - ADD128(ah, al, t1h, t1l); \ - /* add together ad + bc */ \ - ADD128(t2h, t2l, t3h, t3l); \ - /* now (ah,al), (t2l,2*t2h) need summing */ \ - /* first add the high registers, carrying into t2h */ \ - ADD128(t2h, ah, z, t2l); \ - /* double t2h and add top bit of ah */ \ - t2h = 2 * t2h + (ah >> 63); \ - ah &= m63; \ - /* now add the low registers */ \ - ADD128(ah, al, mh, ml); \ - ADD128(ah, al, z, t2h); \ - } while (0) - -#else /* ! CONFIG_64BIT */ - -#ifndef nh_16 -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - u64 t1, t2, m1, m2, t; \ - int i; \ - rh = rl = t = 0; \ - for (i = 0; i < nw; i += 2) { \ - t1 = pe64_to_cpup(mp+i) + kp[i]; \ - t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ - m2 = MUL32(t1 >> 32, t2); \ - m1 = MUL32(t1, t2 >> 32); \ - ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ - MUL32(t1, t2)); \ - rh += (u64)(u32)(m1 >> 32) \ - + (u32)(m2 >> 32); \ - t += (u64)(u32)m1 + (u32)m2; \ - } \ - ADD128(rh, rl, (t >> 32), (t << 32)); \ - } while (0) -#endif - -static void poly_step_func(u64 *ahi, u64 *alo, - const u64 *kh, const u64 *kl, - const u64 *mh, const u64 *ml) -{ -#define a0 (*(((u32 *)alo)+INDEX_LOW)) -#define a1 (*(((u32 *)alo)+INDEX_HIGH)) -#define a2 (*(((u32 *)ahi)+INDEX_LOW)) -#define a3 (*(((u32 *)ahi)+INDEX_HIGH)) -#define k0 (*(((u32 *)kl)+INDEX_LOW)) -#define k1 (*(((u32 *)kl)+INDEX_HIGH)) -#define k2 (*(((u32 *)kh)+INDEX_LOW)) -#define k3 (*(((u32 *)kh)+INDEX_HIGH)) - - u64 p, q, t; - u32 t2; - - p = MUL32(a3, k3); - p += p; - p += *(u64 *)mh; - p += MUL32(a0, k2); - p += MUL32(a1, k1); - p += MUL32(a2, k0); - t = (u32)(p); - p >>= 32; - p += MUL32(a0, k3); - p += MUL32(a1, k2); - p += MUL32(a2, k1); - p += MUL32(a3, k0); - t |= ((u64)((u32)p & 0x7fffffff)) << 32; - p >>= 31; - p += (u64)(((u32 *)ml)[INDEX_LOW]); - p += MUL32(a0, k0); - q = MUL32(a1, k3); - q += MUL32(a2, k2); - q += MUL32(a3, k1); - q += q; - p += q; - t2 = (u32)(p); - p >>= 32; - p += (u64)(((u32 *)ml)[INDEX_HIGH]); - p += MUL32(a0, k1); - p += MUL32(a1, k0); - q = MUL32(a2, k3); - q += MUL32(a3, k2); - q += q; - p += q; - *(u64 *)(alo) = (p << 32) | t2; - p >>= 32; - *(u64 *)(ahi) = p + t; - -#undef a0 -#undef a1 -#undef a2 -#undef a3 -#undef k0 -#undef k1 -#undef k2 -#undef k3 -} - -#define poly_step(ah, al, kh, kl, mh, ml) \ - poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) - -#endif /* end of specialized NH and poly definitions */ - -/* At least nh_16 is defined. Defined others as needed here */ -#ifndef nh_16_2 -#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_16(mp, kp, nw, rh, rl); \ - nh_16(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif -#ifndef nh_vmac_nhbytes -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - nh_16(mp, kp, nw, rh, rl) -#endif -#ifndef nh_vmac_nhbytes_2 -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ - nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif - -static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) -{ - u64 rh, rl, t, z = 0; - - /* fully reduce (p1,p2)+(len,0) mod p127 */ - t = p1 >> 63; - p1 &= m63; - ADD128(p1, p2, len, t); - /* At this point, (p1,p2) is at most 2^127+(len<<64) */ - t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); - ADD128(p1, p2, z, t); - p1 &= m63; - - /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ - t = p1 + (p2 >> 32); - t += (t >> 32); - t += (u32)t > 0xfffffffeu; - p1 += (t >> 32); - p2 += (p1 << 32); - - /* compute (p1+k1)%p64 and (p2+k2)%p64 */ - p1 += k1; - p1 += (0 - (p1 < k1)) & 257; - p2 += k2; - p2 += (0 - (p2 < k2)) & 257; - - /* compute (p1+k1)*(p2+k2)%p64 */ - MUL64(rh, rl, p1, p2); - t = rh >> 56; - ADD128(t, rl, z, rh); - rh <<= 8; - ADD128(t, rl, z, rh); - t += t << 8; - rl += t; - rl += (0 - (rl < t)) & 257; - rl += (0 - (rl > p64-1)) & 257; - return rl; -} - -/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ -static void vhash_blocks(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx, - const __le64 *mptr, unsigned int blocks) -{ - const u64 *kptr = tctx->nhkey; - const u64 pkh = tctx->polykey[0]; - const u64 pkl = tctx->polykey[1]; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - u64 rh, rl; - - if (!dctx->first_block_processed) { - dctx->first_block_processed = true; - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - ADD128(ch, cl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - blocks--; - } - - while (blocks--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - - dctx->polytmp[0] = ch; - dctx->polytmp[1] = cl; -} - -static int vmac_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); - __be64 out[2]; - u8 in[16] = { 0 }; - unsigned int i; - int err; - - if (keylen != VMAC_KEY_LEN) - return -EINVAL; - - err = crypto_cipher_setkey(tctx->cipher, key, keylen); - if (err) - return err; - - /* Fill nh key */ - in[0] = 0x80; - for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->nhkey[i] = be64_to_cpu(out[0]); - tctx->nhkey[i+1] = be64_to_cpu(out[1]); - in[15]++; - } - - /* Fill poly key */ - in[0] = 0xC0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; - tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; - in[15]++; - } - - /* Fill ip key */ - in[0] = 0xE0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { - do { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->l3key[i] = be64_to_cpu(out[0]); - tctx->l3key[i+1] = be64_to_cpu(out[1]); - in[15]++; - } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); - } - - return 0; -} - -static int vmac_init(struct shash_desc *desc) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->partial_size = 0; - dctx->first_block_processed = false; - memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); - dctx->nonce_size = 0; - return 0; -} - -static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int n; - - /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */ - if (dctx->nonce_size < VMAC_NONCEBYTES) { - n = min(len, VMAC_NONCEBYTES - dctx->nonce_size); - memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n); - dctx->nonce_size += n; - p += n; - len -= n; - } - - if (dctx->partial_size) { - n = min(len, VMAC_NHBYTES - dctx->partial_size); - memcpy(&dctx->partial[dctx->partial_size], p, n); - dctx->partial_size += n; - p += n; - len -= n; - if (dctx->partial_size == VMAC_NHBYTES) { - vhash_blocks(tctx, dctx, dctx->partial_words, 1); - dctx->partial_size = 0; - } - } - - if (len >= VMAC_NHBYTES) { - n = round_down(len, VMAC_NHBYTES); - /* TODO: 'p' may be misaligned here */ - vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); - p += n; - len -= n; - } - - if (len) { - memcpy(dctx->partial, p, len); - dctx->partial_size = len; - } - - return 0; -} - -static u64 vhash_final(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx) -{ - unsigned int partial = dctx->partial_size; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - - /* L1 and L2-hash the final block if needed */ - if (partial) { - /* Zero-pad to next 128-bit boundary */ - unsigned int n = round_up(partial, 16); - u64 rh, rl; - - memset(&dctx->partial[partial], 0, n - partial); - nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); - rh &= m62; - if (dctx->first_block_processed) - poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], - rh, rl); - else - ADD128(ch, cl, rh, rl); - } - - /* L3-hash the 128-bit output of L2-hash */ - return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); -} - -static int vmac_final(struct shash_desc *desc, u8 *out) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - int index; - u64 hash, pad; - - if (dctx->nonce_size != VMAC_NONCEBYTES) - return -EINVAL; - - /* - * The VMAC specification requires a nonce at least 1 bit shorter than - * the block cipher's block length, so we actually only accept a 127-bit - * nonce. We define the unused bit to be the first one and require that - * it be 0, so the needed prepending of a 0 bit is implicit. - */ - if (dctx->nonce.bytes[0] & 0x80) - return -EINVAL; - - /* Finish calculating the VHASH of the message */ - hash = vhash_final(tctx, dctx); - - /* Generate pseudorandom pad by encrypting the nonce */ - BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8)); - index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1; - dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1; - crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes, - dctx->nonce.bytes); - pad = be64_to_cpu(dctx->nonce.pads[index]); - - /* The VMAC is the sum of VHASH and the pseudorandom pad */ - put_unaligned_be64(hash + pad, out); - return 0; -} - -static int vmac_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - tctx->cipher = cipher; - return 0; -} - -static void vmac_exit_tfm(struct crypto_tfm *tfm) -{ - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(tctx->cipher); -} - -static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct shash_instance *inst; - struct crypto_cipher_spawn *spawn; - struct crypto_alg *alg; - u32 mask; - int err; - - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - spawn = shash_instance_ctx(inst); - - err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, mask); - if (err) - goto err_free_inst; - alg = crypto_spawn_cipher_alg(spawn); - - err = -EINVAL; - if (alg->cra_blocksize != VMAC_NONCEBYTES) - goto err_free_inst; - - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); - if (err) - goto err_free_inst; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); - inst->alg.base.cra_init = vmac_init_tfm; - inst->alg.base.cra_exit = vmac_exit_tfm; - - inst->alg.descsize = sizeof(struct vmac_desc_ctx); - inst->alg.digestsize = VMAC_TAG_LEN / 8; - inst->alg.init = vmac_init; - inst->alg.update = vmac_update; - inst->alg.final = vmac_final; - inst->alg.setkey = vmac_setkey; - - inst->free = shash_free_singlespawn_instance; - - err = shash_register_instance(tmpl, inst); - if (err) { -err_free_inst: - shash_free_singlespawn_instance(inst); - } - return err; -} - -static struct crypto_template vmac64_tmpl = { - .name = "vmac64", - .create = vmac_create, - .module = THIS_MODULE, -}; - -static int __init vmac_module_init(void) -{ - return crypto_register_template(&vmac64_tmpl); -} - -static void __exit vmac_module_exit(void) -{ - crypto_unregister_template(&vmac64_tmpl); -} - -subsys_initcall(vmac_module_init); -module_exit(vmac_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("VMAC hash algorithm"); -MODULE_ALIAS_CRYPTO("vmac64"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/wp512.c b/crypto/wp512.c index bf79fbb2340f..229b189a7988 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -21,10 +21,10 @@ */ #include <crypto/internal/hash.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <asm/byteorder.h> -#include <linux/types.h> +#include <linux/string.h> +#include <linux/unaligned.h> #define WP512_DIGEST_SIZE 64 #define WP384_DIGEST_SIZE 48 @@ -37,9 +37,6 @@ struct wp512_ctx { u8 bitLength[WP512_LENGTHBYTES]; - u8 buffer[WP512_BLOCK_SIZE]; - int bufferBits; - int bufferPos; u64 hash[WP512_DIGEST_SIZE/8]; }; @@ -775,20 +772,20 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = { 0xca2dbf07ad5a8333ULL, }; -/** +/* * The core Whirlpool transform. */ -static void wp512_process_buffer(struct wp512_ctx *wctx) { +static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx, + const u8 *buffer) { int i, r; u64 K[8]; /* the round key */ u64 block[8]; /* mu(buffer) */ u64 state[8]; /* the cipher state */ u64 L[8]; - const __be64 *buffer = (const __be64 *)wctx->buffer; for (i = 0; i < 8; i++) - block[i] = be64_to_cpu(buffer[i]); + block[i] = get_unaligned_be64(buffer + i * 8); state[0] = block[0] ^ (K[0] = wctx->hash[0]); state[1] = block[1] ^ (K[1] = wctx->hash[1]); @@ -991,8 +988,6 @@ static int wp512_init(struct shash_desc *desc) { int i; memset(wctx->bitLength, 0, 32); - wctx->bufferBits = wctx->bufferPos = 0; - wctx->buffer[0] = 0; for (i = 0; i < 8; i++) { wctx->hash[i] = 0L; } @@ -1000,84 +995,54 @@ static int wp512_init(struct shash_desc *desc) { return 0; } -static int wp512_update(struct shash_desc *desc, const u8 *source, - unsigned int len) +static void wp512_add_length(u8 *bitLength, u64 value) { - struct wp512_ctx *wctx = shash_desc_ctx(desc); - int sourcePos = 0; - unsigned int bits_len = len * 8; // convert to number of bits - int sourceGap = (8 - ((int)bits_len & 7)) & 7; - int bufferRem = wctx->bufferBits & 7; + u32 carry; int i; - u32 b, carry; - u8 *buffer = wctx->buffer; - u8 *bitLength = wctx->bitLength; - int bufferBits = wctx->bufferBits; - int bufferPos = wctx->bufferPos; - u64 value = bits_len; for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) { carry += bitLength[i] + ((u32)value & 0xff); bitLength[i] = (u8)carry; carry >>= 8; value >>= 8; } - while (bits_len > 8) { - b = ((source[sourcePos] << sourceGap) & 0xff) | - ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap)); - buffer[bufferPos++] |= (u8)(b >> bufferRem); - bufferBits += 8 - bufferRem; - if (bufferBits == WP512_BLOCK_SIZE * 8) { - wp512_process_buffer(wctx); - bufferBits = bufferPos = 0; - } - buffer[bufferPos] = b << (8 - bufferRem); - bufferBits += bufferRem; - bits_len -= 8; - sourcePos++; - } - if (bits_len > 0) { - b = (source[sourcePos] << sourceGap) & 0xff; - buffer[bufferPos] |= b >> bufferRem; - } else { - b = 0; - } - if (bufferRem + bits_len < 8) { - bufferBits += bits_len; - } else { - bufferPos++; - bufferBits += 8 - bufferRem; - bits_len -= 8 - bufferRem; - if (bufferBits == WP512_BLOCK_SIZE * 8) { - wp512_process_buffer(wctx); - bufferBits = bufferPos = 0; - } - buffer[bufferPos] = b << (8 - bufferRem); - bufferBits += (int)bits_len; - } +} + +static int wp512_update(struct shash_desc *desc, const u8 *source, + unsigned int len) +{ + struct wp512_ctx *wctx = shash_desc_ctx(desc); + unsigned int remain = len % WP512_BLOCK_SIZE; + u64 bits_len = (len - remain) * 8ull; + u8 *bitLength = wctx->bitLength; - wctx->bufferBits = bufferBits; - wctx->bufferPos = bufferPos; + wp512_add_length(bitLength, bits_len); + do { + wp512_process_buffer(wctx, source); + source += WP512_BLOCK_SIZE; + bits_len -= WP512_BLOCK_SIZE * 8; + } while (bits_len); - return 0; + return remain; } -static int wp512_final(struct shash_desc *desc, u8 *out) +static int wp512_finup(struct shash_desc *desc, const u8 *src, + unsigned int bufferPos, u8 *out) { struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; - u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; - int bufferBits = wctx->bufferBits; - int bufferPos = wctx->bufferPos; __be64 *digest = (__be64 *)out; + u8 buffer[WP512_BLOCK_SIZE]; - buffer[bufferPos] |= 0x80U >> (bufferBits & 7); + wp512_add_length(bitLength, bufferPos * 8); + memcpy(buffer, src, bufferPos); + buffer[bufferPos] = 0x80U; bufferPos++; if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) { if (bufferPos < WP512_BLOCK_SIZE) memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos); - wp512_process_buffer(wctx); + wp512_process_buffer(wctx, buffer); bufferPos = 0; } if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES) @@ -1086,31 +1051,32 @@ static int wp512_final(struct shash_desc *desc, u8 *out) bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES; memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], bitLength, WP512_LENGTHBYTES); - wp512_process_buffer(wctx); + wp512_process_buffer(wctx, buffer); + memzero_explicit(buffer, sizeof(buffer)); for (i = 0; i < WP512_DIGEST_SIZE/8; i++) digest[i] = cpu_to_be64(wctx->hash[i]); - wctx->bufferBits = bufferBits; - wctx->bufferPos = bufferPos; return 0; } -static int wp384_final(struct shash_desc *desc, u8 *out) +static int wp384_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { u8 D[64]; - wp512_final(desc, D); + wp512_finup(desc, src, len, D); memcpy(out, D, WP384_DIGEST_SIZE); memzero_explicit(D, WP512_DIGEST_SIZE); return 0; } -static int wp256_final(struct shash_desc *desc, u8 *out) +static int wp256_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { u8 D[64]; - wp512_final(desc, D); + wp512_finup(desc, src, len, D); memcpy(out, D, WP256_DIGEST_SIZE); memzero_explicit(D, WP512_DIGEST_SIZE); @@ -1121,11 +1087,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP512_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp512_final, + .finup = wp512_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp512", .cra_driver_name = "wp512-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1133,11 +1100,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP384_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp384_final, + .finup = wp384_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp384", .cra_driver_name = "wp384-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1145,11 +1113,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP256_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp256_final, + .finup = wp256_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp256", .cra_driver_name = "wp256-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1169,7 +1138,7 @@ MODULE_ALIAS_CRYPTO("wp512"); MODULE_ALIAS_CRYPTO("wp384"); MODULE_ALIAS_CRYPTO("wp256"); -subsys_initcall(wp512_mod_init); +module_init(wp512_mod_init); module_exit(wp512_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 6074c5c1da49..6c5f6766fdd6 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -8,9 +8,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, @@ -27,23 +30,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, */ struct xcbc_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; -}; - -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | xcbc_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct xcbc_desc_ctx { - unsigned int len; - u8 ctx[]; + u8 consts[]; }; #define XCBC_BLOCKSIZE 16 @@ -51,9 +38,8 @@ struct xcbc_desc_ctx { static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); - u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *consts = ctx->consts; int err = 0; u8 key1[XCBC_BLOCKSIZE]; int bs = sizeof(key1); @@ -71,14 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -86,81 +68,37 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } - - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); - - /* clearing the length */ - ctx->len = 0; + u8 *prev = shash_desc_ctx(pdesc); - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len -1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } - - crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); - + crypto_xor(prev, &tctx->consts[offset], bs); crypto_cipher_encrypt_one(tfm, out, prev); - return 0; } @@ -191,7 +129,6 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -218,27 +155,22 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask | 3; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), - crypto_tfm_ctx_alignment()) + - (alignmask & - ~(crypto_tfm_ctx_alignment() - 1)) + - alg->cra_blocksize * 2; - - inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), - alignmask + 1) + - alg->cra_blocksize * 2; + inst->alg.descsize = alg->cra_blocksize; + inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; inst->alg.init = crypto_xcbc_digest_init; inst->alg.update = crypto_xcbc_digest_update; - inst->alg.final = crypto_xcbc_digest_final; + inst->alg.finup = crypto_xcbc_digest_finup; inst->alg.setkey = crypto_xcbc_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -267,10 +199,10 @@ static void __exit crypto_xcbc_module_exit(void) crypto_unregister_template(&crypto_xcbc_tmpl); } -subsys_initcall(crypto_xcbc_module_init); +module_init(crypto_xcbc_module_init); module_exit(crypto_xcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCBC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("xcbc"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/xctr.c b/crypto/xctr.c new file mode 100644 index 000000000000..607ab82cb19b --- /dev/null +++ b/crypto/xctr.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * XCTR: XOR Counter mode - Adapted from ctr.c + * + * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> + * Copyright 2021 Google LLC + */ + +/* + * XCTR mode is a blockcipher mode of operation used to implement HCTR2. XCTR is + * closely related to the CTR mode of operation; the main difference is that CTR + * generates the keystream using E(CTR + IV) whereas XCTR generates the + * keystream using E(CTR ^ IV). This allows implementations to avoid dealing + * with multi-limb integers (as is required in CTR mode). XCTR is also specified + * using little-endian arithmetic which makes it slightly faster on LE machines. + * + * See the HCTR2 paper for more details: + * Length-preserving encryption with HCTR2 + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include <crypto/algapi.h> +#include <crypto/internal/cipher.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +/* For now this implementation is limited to 16-byte blocks for simplicity */ +#define XCTR_BLOCKSIZE 16 + +static void crypto_xctr_crypt_final(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + u8 keystream[XCTR_BLOCKSIZE]; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + crypto_cipher_encrypt_one(tfm, keystream, walk->iv); + crypto_xor_cpy(dst, keystream, src, nbytes); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); +} + +static int crypto_xctr_crypt_segment(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + do { + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + fn(crypto_cipher_tfm(tfm), dst, walk->iv); + crypto_xor(dst, src, XCTR_BLOCKSIZE); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + + le32_add_cpu(&ctr32, 1); + + src += XCTR_BLOCKSIZE; + dst += XCTR_BLOCKSIZE; + } while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE); + + return nbytes; +} + +static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned int nbytes = walk->nbytes; + u8 *data = walk->dst.virt.addr; + u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; + u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + do { + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + fn(crypto_cipher_tfm(tfm), keystream, walk->iv); + crypto_xor(data, keystream, XCTR_BLOCKSIZE); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + + le32_add_cpu(&ctr32, 1); + + data += XCTR_BLOCKSIZE; + } while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE); + + return nbytes; +} + +static int crypto_xctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u32 byte_ctr = 0; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes >= XCTR_BLOCKSIZE) { + if (walk.src.virt.addr == walk.dst.virt.addr) + nbytes = crypto_xctr_crypt_inplace(&walk, cipher, + byte_ctr); + else + nbytes = crypto_xctr_crypt_segment(&walk, cipher, + byte_ctr); + + byte_ctr += walk.nbytes - nbytes; + err = skcipher_walk_done(&walk, nbytes); + } + + if (walk.nbytes) { + crypto_xctr_crypt_final(&walk, cipher, byte_ctr); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +static int crypto_xctr_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct skcipher_instance *inst; + struct crypto_alg *alg; + int err; + + inst = skcipher_alloc_instance_simple(tmpl, tb); + if (IS_ERR(inst)) + return PTR_ERR(inst); + + alg = skcipher_ialg_simple(inst); + + /* Block size must be 16 bytes. */ + err = -EINVAL; + if (alg->cra_blocksize != XCTR_BLOCKSIZE) + goto out_free_inst; + + /* XCTR mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; + + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; + + inst->alg.encrypt = crypto_xctr_crypt; + inst->alg.decrypt = crypto_xctr_crypt; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +out_free_inst: + inst->free(inst); + } + + return err; +} + +static struct crypto_template crypto_xctr_tmpl = { + .name = "xctr", + .create = crypto_xctr_create, + .module = THIS_MODULE, +}; + +static int __init crypto_xctr_module_init(void) +{ + return crypto_register_template(&crypto_xctr_tmpl); +} + +static void __exit crypto_xctr_module_exit(void) +{ + crypto_unregister_template(&crypto_xctr_tmpl); +} + +module_init(crypto_xctr_module_init); +module_exit(crypto_xctr_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("XCTR block cipher mode of operation"); +MODULE_ALIAS_CRYPTO("xctr"); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/xor.c b/crypto/xor.c index 8e72e5d5db0d..f39621a57bb3 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0; tmpl->next = template_list; template_list = tmpl; preempt_disable(); - min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start); preempt_enable(); // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed; pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); @@ -165,6 +162,7 @@ out: static __exit void xor_exit(void) { } +MODULE_DESCRIPTION("RAID-5 checksumming functions"); MODULE_LICENSE("GPL"); #ifndef MODULE diff --git a/crypto/xts.c b/crypto/xts.c index 6c12f30dbdd6..3da8f5e053d6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -28,7 +28,7 @@ struct xts_tfm_ctx { struct xts_instance_ctx { struct crypto_skcipher_spawn spawn; - char name[CRYPTO_MAX_ALG_NAME]; + struct crypto_cipher_spawn tweak_spawn; }; struct xts_request_ctx { @@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, while (w.nbytes) { unsigned int avail = w.nbytes; - le128 *wsrc; + const le128 *wsrc; le128 *wdst; wsrc = w.src.virt.addr; @@ -140,9 +140,9 @@ static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) return xts_xor_tweak(req, true, enc); } -static void xts_cts_done(struct crypto_async_request *areq, int err) +static void xts_cts_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; le128 b; if (!err) { @@ -196,19 +196,19 @@ static int xts_cts_final(struct skcipher_request *req, return 0; } -static void xts_encrypt_done(struct crypto_async_request *areq, int err) +static void xts_encrypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (!err) { struct xts_request_ctx *rctx = skcipher_request_ctx(req); - rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { err = xts_cts_final(req, crypto_skcipher_encrypt); - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; } } @@ -216,19 +216,19 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static void xts_decrypt_done(struct crypto_async_request *areq, int err) +static void xts_decrypt_done(void *data, int err) { - struct skcipher_request *req = areq->data; + struct skcipher_request *req = data; if (!err) { struct xts_request_ctx *rctx = skcipher_request_ctx(req); - rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { err = xts_cts_final(req, crypto_skcipher_decrypt); - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; } } @@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm) ctx->child = child; - tweak = crypto_alloc_cipher(ictx->name, 0, 0); + tweak = crypto_spawn_cipher(&ictx->tweak_spawn); if (IS_ERR(tweak)) { crypto_free_skcipher(ctx->child); return PTR_ERR(tweak); @@ -333,14 +333,16 @@ static void xts_free_instance(struct skcipher_instance *inst) struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ictx->spawn); + crypto_drop_cipher(&ictx->tweak_spawn); kfree(inst); } static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *alg; + char name[CRYPTO_MAX_ALG_NAME]; struct skcipher_instance *inst; struct xts_instance_ctx *ctx; - struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; @@ -361,27 +363,27 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; - if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), - ctx->name, 0, mask); + name, 0, mask); } if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = -EINVAL; if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", @@ -395,34 +397,39 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + if (!memcmp(cipher_name, "ecb(", 4)) { + int len; - len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); - if (len < 2 || len >= sizeof(ctx->name)) + len = strscpy(name, cipher_name + 4, sizeof(name)); + if (len < 2) goto err_free_inst; - if (ctx->name[len - 1] != ')') + if (name[len - 1] != ')') goto err_free_inst; - ctx->name[len - 1] = 0; + name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { + "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; goto err_free_inst; } } else goto err_free_inst; + err = crypto_grab_cipher(&ctx->tweak_spawn, + skcipher_crypto_instance(inst), name, 0, mask); + if (err) + goto err_free_inst; + inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | (__alignof__(u64) - 1); inst->alg.ivsize = XTS_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; + inst->alg.min_keysize = alg->min_keysize * 2; + inst->alg.max_keysize = alg->max_keysize * 2; inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); @@ -459,10 +466,11 @@ static void __exit xts_module_exit(void) crypto_unregister_template(&xts_tmpl); } -subsys_initcall(xts_module_init); +module_init(xts_module_init); module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); MODULE_ALIAS_CRYPTO("xts"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); +MODULE_SOFTDEP("pre: ecb"); diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c index 55d1c8a76127..175bb7ae0fcd 100644 --- a/crypto/xxhash_generic.c +++ b/crypto/xxhash_generic.c @@ -4,7 +4,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/xxhash.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 @@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(xxhash_mod_init); +module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); diff --git a/crypto/zstd.c b/crypto/zstd.c index 1a3309f066f7..cbbd0413751a 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -10,247 +10,304 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> +#include <linux/overflow.h> #include <linux/vmalloc.h> #include <linux/zstd.h> -#include <crypto/internal/scompress.h> +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> -#define ZSTD_DEF_LEVEL 3 +#define ZSTD_DEF_LEVEL 3 +#define ZSTD_MAX_WINDOWLOG 18 +#define ZSTD_MAX_SIZE BIT(ZSTD_MAX_WINDOWLOG) struct zstd_ctx { - ZSTD_CCtx *cctx; - ZSTD_DCtx *dctx; - void *cwksp; - void *dwksp; + zstd_cctx *cctx; + zstd_dctx *dctx; + size_t wksp_size; + zstd_parameters params; + u8 wksp[] __aligned(8) __counted_by(wksp_size); }; -static ZSTD_parameters zstd_params(void) -{ - return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0); -} +static DEFINE_MUTEX(zstd_stream_lock); -static int zstd_comp_init(struct zstd_ctx *ctx) +static void *zstd_alloc_stream(void) { - int ret = 0; - const ZSTD_parameters params = zstd_params(); - const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams); + zstd_parameters params; + struct zstd_ctx *ctx; + size_t wksp_size; - ctx->cwksp = vzalloc(wksp_size); - if (!ctx->cwksp) { - ret = -ENOMEM; - goto out; - } + params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE); - ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size); - if (!ctx->cctx) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(ctx->cwksp); - goto out; -} + wksp_size = max(zstd_cstream_workspace_bound(¶ms.cParams), + zstd_dstream_workspace_bound(ZSTD_MAX_SIZE)); + if (!wksp_size) + return ERR_PTR(-EINVAL); -static int zstd_decomp_init(struct zstd_ctx *ctx) -{ - int ret = 0; - const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); + ctx = kvmalloc(struct_size(ctx, wksp, wksp_size), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); - ctx->dwksp = vzalloc(wksp_size); - if (!ctx->dwksp) { - ret = -ENOMEM; - goto out; - } + ctx->params = params; + ctx->wksp_size = wksp_size; - ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size); - if (!ctx->dctx) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(ctx->dwksp); - goto out; + return ctx; } -static void zstd_comp_exit(struct zstd_ctx *ctx) +static void zstd_free_stream(void *ctx) { - vfree(ctx->cwksp); - ctx->cwksp = NULL; - ctx->cctx = NULL; + kvfree(ctx); } -static void zstd_decomp_exit(struct zstd_ctx *ctx) -{ - vfree(ctx->dwksp); - ctx->dwksp = NULL; - ctx->dctx = NULL; -} +static struct crypto_acomp_streams zstd_streams = { + .alloc_ctx = zstd_alloc_stream, + .free_ctx = zstd_free_stream, +}; -static int __zstd_init(void *ctx) +static int zstd_init(struct crypto_acomp *acomp_tfm) { - int ret; + int ret = 0; + + mutex_lock(&zstd_stream_lock); + ret = crypto_acomp_alloc_streams(&zstd_streams); + mutex_unlock(&zstd_stream_lock); - ret = zstd_comp_init(ctx); - if (ret) - return ret; - ret = zstd_decomp_init(ctx); - if (ret) - zstd_comp_exit(ctx); return ret; } -static void *zstd_alloc_ctx(struct crypto_scomp *tfm) +static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx, + const void *src, void *dst, unsigned int *dlen) { - int ret; - struct zstd_ctx *ctx; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); - - ret = __zstd_init(ctx); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } + size_t out_len; - return ctx; -} + ctx->cctx = zstd_init_cctx(ctx->wksp, ctx->wksp_size); + if (!ctx->cctx) + return -EINVAL; -static int zstd_init(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + out_len = zstd_compress_cctx(ctx->cctx, dst, req->dlen, src, req->slen, + &ctx->params); + if (zstd_is_error(out_len)) + return -EINVAL; - return __zstd_init(ctx); -} + *dlen = out_len; -static void __zstd_exit(void *ctx) -{ - zstd_comp_exit(ctx); - zstd_decomp_exit(ctx); + return 0; } -static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) +static int zstd_compress(struct acomp_req *req) { - __zstd_exit(ctx); - kfree_sensitive(ctx); -} + struct crypto_acomp_stream *s; + unsigned int pos, scur, dcur; + unsigned int total_out = 0; + bool data_available = true; + zstd_out_buffer outbuf; + struct acomp_walk walk; + zstd_in_buffer inbuf; + struct zstd_ctx *ctx; + size_t pending_bytes; + size_t num_bytes; + int ret; -static void zstd_exit(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); + s = crypto_acomp_lock_stream_bh(&zstd_streams); + ctx = s->ctx; - __zstd_exit(ctx); -} + ret = acomp_walk_virt(&walk, req, true); + if (ret) + goto out; -static int __zstd_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - size_t out_len; - struct zstd_ctx *zctx = ctx; - const ZSTD_parameters params = zstd_params(); + ctx->cctx = zstd_init_cstream(&ctx->params, 0, ctx->wksp, ctx->wksp_size); + if (!ctx->cctx) { + ret = -EINVAL; + goto out; + } - out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params); - if (ZSTD_isError(out_len)) - return -EINVAL; - *dlen = out_len; - return 0; -} + do { + dcur = acomp_walk_next_dst(&walk); + if (!dcur) { + ret = -ENOSPC; + goto out; + } + + outbuf.pos = 0; + outbuf.dst = (u8 *)walk.dst.virt.addr; + outbuf.size = dcur; + + do { + scur = acomp_walk_next_src(&walk); + if (dcur == req->dlen && scur == req->slen) { + ret = zstd_compress_one(req, ctx, walk.src.virt.addr, + walk.dst.virt.addr, &total_out); + acomp_walk_done_src(&walk, scur); + acomp_walk_done_dst(&walk, dcur); + goto out; + } + + if (scur) { + inbuf.pos = 0; + inbuf.src = walk.src.virt.addr; + inbuf.size = scur; + } else { + data_available = false; + break; + } + + num_bytes = zstd_compress_stream(ctx->cctx, &outbuf, &inbuf); + if (ZSTD_isError(num_bytes)) { + ret = -EIO; + goto out; + } + + pending_bytes = zstd_flush_stream(ctx->cctx, &outbuf); + if (ZSTD_isError(pending_bytes)) { + ret = -EIO; + goto out; + } + acomp_walk_done_src(&walk, inbuf.pos); + } while (dcur != outbuf.pos); + + total_out += outbuf.pos; + acomp_walk_done_dst(&walk, dcur); + } while (data_available); + + pos = outbuf.pos; + num_bytes = zstd_end_stream(ctx->cctx, &outbuf); + if (ZSTD_isError(num_bytes)) + ret = -EIO; + else + total_out += (outbuf.pos - pos); -static int zstd_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); +out: + if (ret) + req->dlen = 0; + else + req->dlen = total_out; - return __zstd_compress(src, slen, dst, dlen, ctx); -} + crypto_acomp_unlock_stream_bh(s); -static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __zstd_compress(src, slen, dst, dlen, ctx); + return ret; } -static int __zstd_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int zstd_decompress_one(struct acomp_req *req, struct zstd_ctx *ctx, + const void *src, void *dst, unsigned int *dlen) { size_t out_len; - struct zstd_ctx *zctx = ctx; - out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen); - if (ZSTD_isError(out_len)) + ctx->dctx = zstd_init_dctx(ctx->wksp, ctx->wksp_size); + if (!ctx->dctx) return -EINVAL; + + out_len = zstd_decompress_dctx(ctx->dctx, dst, req->dlen, src, req->slen); + if (zstd_is_error(out_len)) + return -EINVAL; + *dlen = out_len; + return 0; } -static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int zstd_decompress(struct acomp_req *req) { - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_decompress(src, slen, dst, dlen, ctx); -} + struct crypto_acomp_stream *s; + unsigned int total_out = 0; + unsigned int scur, dcur; + zstd_out_buffer outbuf; + struct acomp_walk walk; + zstd_in_buffer inbuf; + struct zstd_ctx *ctx; + size_t pending_bytes; + int ret; -static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __zstd_decompress(src, slen, dst, dlen, ctx); -} + s = crypto_acomp_lock_stream_bh(&zstd_streams); + ctx = s->ctx; -static struct crypto_alg alg = { - .cra_name = "zstd", - .cra_driver_name = "zstd-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zstd_ctx), - .cra_module = THIS_MODULE, - .cra_init = zstd_init, - .cra_exit = zstd_exit, - .cra_u = { .compress = { - .coa_compress = zstd_compress, - .coa_decompress = zstd_decompress } } -}; + ret = acomp_walk_virt(&walk, req, true); + if (ret) + goto out; -static struct scomp_alg scomp = { - .alloc_ctx = zstd_alloc_ctx, - .free_ctx = zstd_free_ctx, - .compress = zstd_scompress, - .decompress = zstd_sdecompress, - .base = { - .cra_name = "zstd", - .cra_driver_name = "zstd-scomp", - .cra_module = THIS_MODULE, + ctx->dctx = zstd_init_dstream(ZSTD_MAX_SIZE, ctx->wksp, ctx->wksp_size); + if (!ctx->dctx) { + ret = -EINVAL; + goto out; } -}; -static int __init zstd_mod_init(void) -{ - int ret; + do { + scur = acomp_walk_next_src(&walk); + if (scur) { + inbuf.pos = 0; + inbuf.size = scur; + inbuf.src = walk.src.virt.addr; + } else { + break; + } + + do { + dcur = acomp_walk_next_dst(&walk); + if (dcur == req->dlen && scur == req->slen) { + ret = zstd_decompress_one(req, ctx, walk.src.virt.addr, + walk.dst.virt.addr, &total_out); + acomp_walk_done_dst(&walk, dcur); + acomp_walk_done_src(&walk, scur); + goto out; + } + + if (!dcur) { + ret = -ENOSPC; + goto out; + } + + outbuf.pos = 0; + outbuf.dst = (u8 *)walk.dst.virt.addr; + outbuf.size = dcur; + + pending_bytes = zstd_decompress_stream(ctx->dctx, &outbuf, &inbuf); + if (ZSTD_isError(pending_bytes)) { + ret = -EIO; + goto out; + } + + total_out += outbuf.pos; + + acomp_walk_done_dst(&walk, outbuf.pos); + } while (inbuf.pos != scur); + + acomp_walk_done_src(&walk, scur); + } while (ret == 0); - ret = crypto_register_alg(&alg); +out: if (ret) - return ret; + req->dlen = 0; + else + req->dlen = total_out; - ret = crypto_register_scomp(&scomp); - if (ret) - crypto_unregister_alg(&alg); + crypto_acomp_unlock_stream_bh(s); return ret; } +static struct acomp_alg zstd_acomp = { + .base = { + .cra_name = "zstd", + .cra_driver_name = "zstd-generic", + .cra_flags = CRYPTO_ALG_REQ_VIRT, + .cra_module = THIS_MODULE, + }, + .init = zstd_init, + .compress = zstd_compress, + .decompress = zstd_decompress, +}; + +static int __init zstd_mod_init(void) +{ + return crypto_register_acomp(&zstd_acomp); +} + static void __exit zstd_mod_fini(void) { - crypto_unregister_alg(&alg); - crypto_unregister_scomp(&scomp); + crypto_unregister_acomp(&zstd_acomp); + crypto_acomp_free_streams(&zstd_streams); } -subsys_initcall(zstd_mod_init); +module_init(zstd_mod_init); module_exit(zstd_mod_fini); MODULE_LICENSE("GPL"); |
