diff options
Diffstat (limited to 'crypto')
170 files changed, 12006 insertions, 9402 deletions
diff --git a/crypto/842.c b/crypto/842.c index e59e54d76960..8c257c40e2b9 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -18,17 +18,12 @@ * drivers/crypto/nx/nx-842-crypto.c */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/sw842.h> -#include <crypto/internal/scompress.h> - -struct crypto842_ctx { - void *wmem; /* working memory for compress */ -}; -static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) +static void *crypto842_alloc_ctx(void) { void *ctx; @@ -39,38 +34,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int crypto842_init(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->wmem = crypto842_alloc_ctx(NULL); - if (IS_ERR(ctx->wmem)) - return -ENOMEM; - - return 0; -} - -static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void crypto842_free_ctx(void *ctx) { kfree(ctx); } -static void crypto842_exit(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto842_free_ctx(NULL, ctx->wmem); -} - -static int crypto842_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - return sw842_compress(src, slen, dst, dlen, ctx->wmem); -} - static int crypto842_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -78,13 +46,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm, return sw842_compress(src, slen, dst, dlen, ctx); } -static int crypto842_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - return sw842_decompress(src, slen, dst, dlen); -} - static int crypto842_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -92,20 +53,6 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm, return sw842_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "842", - .cra_driver_name = "842-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct crypto842_ctx), - .cra_module = THIS_MODULE, - .cra_init = crypto842_init, - .cra_exit = crypto842_exit, - .cra_u = { .compress = { - .coa_compress = crypto842_compress, - .coa_decompress = crypto842_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = crypto842_alloc_ctx, .free_ctx = crypto842_free_ctx, @@ -121,25 +68,12 @@ static struct scomp_alg scomp = { static int __init crypto842_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } -subsys_initcall(crypto842_mod_init); +module_init(crypto842_mod_init); static void __exit crypto842_mod_exit(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } module_exit(crypto842_mod_exit); diff --git a/crypto/Kconfig b/crypto/Kconfig index 2903ce19f15c..e1cfd0d4cc8f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -25,7 +25,7 @@ menu "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS + depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS depends on (MODULE_SIG || !MODULES) help This option enables the fips boot option which is @@ -141,12 +141,19 @@ config CRYPTO_ACOMP select CRYPTO_ALGAPI select CRYPTO_ACOMP2 +config CRYPTO_HKDF + tristate + select CRYPTO_SHA256 if CRYPTO_SELFTESTS + select CRYPTO_SHA512 if CRYPTO_SELFTESTS + select CRYPTO_HASH2 + config CRYPTO_MANAGER - tristate "Cryptographic algorithm manager" + tristate + default CRYPTO_ALGAPI if CRYPTO_SELFTESTS select CRYPTO_MANAGER2 help - Create default cryptographic template instantiations such as - cbc(aes). + This provides the support for instantiating templates such as + cbc(aes), and the support for the crypto self-tests. config CRYPTO_MANAGER2 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) @@ -167,35 +174,44 @@ config CRYPTO_USER Userspace configuration for cryptographic instantiations such as cbc(aes). -config CRYPTO_MANAGER_DISABLE_TESTS - bool "Disable run-time self tests" - default y +config CRYPTO_SELFTESTS + bool "Enable cryptographic self-tests" + depends on EXPERT help - Disable run-time self tests that normally take place at - algorithm registration. + Enable the cryptographic self-tests. + + The cryptographic self-tests run at boot time, or at algorithm + registration time if algorithms are dynamically loaded later. + + There are two main use cases for these tests: + + - Development and pre-release testing. In this case, also enable + CRYPTO_SELFTESTS_FULL to get the full set of tests. All crypto code + in the kernel is expected to pass the full set of tests. -config CRYPTO_MANAGER_EXTRA_TESTS - bool "Enable extra run-time crypto self tests" - depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER + - Production kernels, to help prevent buggy drivers from being used + and/or meet FIPS 140-3 pre-operational testing requirements. In + this case, enable CRYPTO_SELFTESTS but not CRYPTO_SELFTESTS_FULL. + +config CRYPTO_SELFTESTS_FULL + bool "Enable the full set of cryptographic self-tests" + depends on CRYPTO_SELFTESTS help - Enable extra run-time self tests of registered crypto algorithms, - including randomized fuzz tests. + Enable the full set of cryptographic self-tests for each algorithm. + + The full set of tests should be enabled for development and + pre-release testing, but not in production kernels. - This is intended for developer use only, as these tests take much - longer to run than the normal self tests. + All crypto code in the kernel is expected to pass the full tests. config CRYPTO_NULL tristate "Null algorithms" - select CRYPTO_NULL2 + select CRYPTO_ALGAPI + select CRYPTO_SKCIPHER + select CRYPTO_HASH help These are 'Null' algorithms, used by IPsec, which do nothing. -config CRYPTO_NULL2 - tristate - select CRYPTO_ALGAPI2 - select CRYPTO_SKCIPHER2 - select CRYPTO_HASH2 - config CRYPTO_PCRYPT tristate "Parallel crypto engine" depends on SMP @@ -222,18 +238,32 @@ config CRYPTO_AUTHENC select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_HASH - select CRYPTO_NULL help Authenc: Combined mode wrapper for IPsec. This is required for IPSec ESP (XFRM_ESP). -config CRYPTO_TEST - tristate "Testing module" +config CRYPTO_KRB5ENC + tristate "Kerberos 5 combined hash+cipher support" + select CRYPTO_AEAD + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + select CRYPTO_HASH + help + Combined hash and cipher support for Kerberos 5 RFC3961 simplified + profile. This is required for Kerberos 5-style encryption, used by + sunrpc/NFS and rxrpc/AFS. + +config CRYPTO_BENCHMARK + tristate "Crypto benchmarking module" depends on m || EXPERT select CRYPTO_MANAGER help - Quick & dirty crypto test module. + Quick & dirty crypto benchmarking module. + + This is mainly intended for use by people developing cryptographic + algorithms in the kernel. It should not be enabled in production + kernels. config CRYPTO_SIMD tristate @@ -250,6 +280,7 @@ config CRYPTO_RSA tristate "RSA (Rivest-Shamir-Adleman)" select CRYPTO_AKCIPHER select CRYPTO_MANAGER + select CRYPTO_SIG select MPILIB select ASN1 help @@ -290,19 +321,19 @@ config CRYPTO_ECDH config CRYPTO_ECDSA tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)" select CRYPTO_ECC - select CRYPTO_AKCIPHER + select CRYPTO_SIG select ASN1 help ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186, ISO/IEC 14888-3) - using curves P-192, P-256, and P-384 + using curves P-192, P-256, P-384 and P-521 Only signature verification is implemented. config CRYPTO_ECRDSA tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)" select CRYPTO_ECC - select CRYPTO_AKCIPHER + select CRYPTO_SIG select CRYPTO_STREEBOG select OID_REGISTRY select ASN1 @@ -313,28 +344,11 @@ config CRYPTO_ECRDSA One of the Russian cryptographic standard algorithms (called GOST algorithms). Only signature verification is implemented. -config CRYPTO_SM2 - tristate "SM2 (ShangMi 2)" - select CRYPTO_SM3 - select CRYPTO_AKCIPHER - select CRYPTO_MANAGER - select MPILIB - select ASN1 - help - SM2 (ShangMi 2) public key algorithm - - Published by State Encryption Management Bureau, China, - as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. - - References: - https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/ - http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml - http://www.gmbz.org.cn/main/bzlb.html - config CRYPTO_CURVE25519 tristate "Curve25519" select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC + select CRYPTO_LIB_CURVE25519_INTERNAL help Curve25519 elliptic curve (RFC7748) @@ -632,6 +646,7 @@ config CRYPTO_ARC4 config CRYPTO_CHACHA20 tristate "ChaCha" + select CRYPTO_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC select CRYPTO_SKCIPHER help @@ -701,14 +716,6 @@ config CRYPTO_HCTR2 See https://eprint.iacr.org/2021/1441 -config CRYPTO_KEYWRAP - tristate "KW (AES Key Wrap)" - select CRYPTO_SKCIPHER - select CRYPTO_MANAGER - help - KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F - and RFC3394) without padding. - config CRYPTO_LRW tristate "LRW (Liskov Rivest Wagner)" select CRYPTO_LIB_GF128MUL @@ -789,8 +796,8 @@ config CRYPTO_AEGIS128_SIMD config CRYPTO_CHACHA20POLY1305 tristate "ChaCha20-Poly1305" select CRYPTO_CHACHA20 - select CRYPTO_POLY1305 select CRYPTO_AEAD + select CRYPTO_LIB_POLY1305 select CRYPTO_MANAGER help ChaCha20 stream cipher and Poly1305 authenticator combined @@ -811,7 +818,6 @@ config CRYPTO_GCM select CRYPTO_CTR select CRYPTO_AEAD select CRYPTO_GHASH - select CRYPTO_NULL select CRYPTO_MANAGER help GCM (Galois/Counter Mode) authenticated encryption mode and GMAC @@ -822,7 +828,6 @@ config CRYPTO_GCM config CRYPTO_GENIV tristate select CRYPTO_AEAD - select CRYPTO_NULL select CRYPTO_MANAGER select CRYPTO_RNG_DEFAULT @@ -958,17 +963,6 @@ config CRYPTO_POLYVAL This is used in HCTR2. It is not a general-purpose cryptographic hash function. -config CRYPTO_POLY1305 - tristate "Poly1305" - select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm (RFC7539) - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the portable C implementation of Poly1305. - config CRYPTO_RMD160 tristate "RIPEMD-160" select CRYPTO_HASH @@ -998,6 +992,7 @@ config CRYPTO_SHA256 tristate "SHA-224 and SHA-256" select CRYPTO_HASH select CRYPTO_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC help SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) @@ -1016,13 +1011,10 @@ config CRYPTO_SHA3 help SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) -config CRYPTO_SM3 - tristate - config CRYPTO_SM3_GENERIC tristate "SM3 (ShangMi 3)" select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3) @@ -1046,16 +1038,6 @@ config CRYPTO_STREEBOG https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf https://tools.ietf.org/html/rfc6986 -config CRYPTO_VMAC - tristate "VMAC" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - VMAC is a message authentication algorithm designed for - very high speed on 64-bit architectures. - - See https://fastcrypto.org/vmac for further information. - config CRYPTO_WP512 tristate "Whirlpool" select CRYPTO_HASH @@ -1116,25 +1098,6 @@ config CRYPTO_CRC32 Used by RoCEv2 and f2fs. -config CRYPTO_CRCT10DIF - tristate "CRCT10DIF" - select CRYPTO_HASH - help - CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) - - CRC algorithm used by the SCSI Block Commands standard. - -config CRYPTO_CRC64_ROCKSOFT - tristate "CRC64 based on Rocksoft Model algorithm" - depends on CRC64 - select CRYPTO_HASH - help - CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm - - Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY) - - See https://zlib.net/crc_v3.txt - endmenu menu "Compression" @@ -1323,7 +1286,7 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE config CRYPTO_JITTERENTROPY_OSR int "CPU Jitter RNG Oversampling Rate" range 1 15 - default 1 + default 3 help The Jitter RNG allows the specification of an oversampling rate (OSR). The Jitter RNG operation requires a fixed amount of timing @@ -1439,7 +1402,6 @@ config CRYPTO_USER_API_AEAD depends on NET select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_NULL select CRYPTO_USER_API help Enable the userspace interface for AEAD cipher algorithms. @@ -1456,26 +1418,6 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE already been phased out from internal use by the kernel, and are only useful for userspace clients that still rely on them. -config CRYPTO_STATS - bool "Crypto usage statistics" - depends on CRYPTO_USER - help - Enable the gathering of crypto stats. - - Enabling this option reduces the performance of the crypto API. It - should only be enabled when there is actually a use case for it. - - This collects data sizes, numbers of requests, and numbers - of errors processed by: - - AEAD ciphers (encrypt, decrypt) - - asymmetric key ciphers (encrypt, decrypt, verify, sign) - - symmetric key ciphers (encrypt, decrypt) - - compression algorithms (compress, decompress) - - hash algorithms (hash) - - key-agreement protocol primitives (setsecret, generate - public key, compute shared secret) - - RNG (generate, seed) - endmenu config CRYPTO_HASH_INFO @@ -1514,5 +1456,6 @@ endif source "drivers/crypto/Kconfig" source "crypto/asymmetric_keys/Kconfig" source "certs/Kconfig" +source "crypto/krb5/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index 408f0a1f9ab9..017df3a2e4bb 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o @@ -20,6 +20,9 @@ crypto_skcipher-y += lskcipher.o crypto_skcipher-y += skcipher.o obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o +ifeq ($(CONFIG_BPF_SYSCALL),y) +obj-$(CONFIG_CRYPTO_SKCIPHER2) += bpf_crypto_skcipher.o +endif obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o @@ -31,6 +34,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o obj-$(CONFIG_CRYPTO_SIG2) += sig.o obj-$(CONFIG_CRYPTO_KPP2) += kpp.o +obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o dh_generic-y := dh.o dh_generic-y += dh_helper.o @@ -45,19 +49,14 @@ rsa_generic-y += rsaprivkey.asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o rsa_generic-y += rsa-pkcs1pad.o +rsa_generic-y += rsassa-pkcs1.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o -$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h -$(obj)/sm2.o: $(obj)/sm2signature.asn1.h - -sm2_generic-y += sm2signature.asn1.o -sm2_generic-y += sm2.o - -obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o - $(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h -$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h +$(obj)/ecdsa-x962.o: $(obj)/ecdsasignature.asn1.h ecdsa_generic-y += ecdsa.o +ecdsa_generic-y += ecdsa-x962.o +ecdsa_generic-y += ecdsa-p1363.o ecdsa_generic-y += ecdsasignature.asn1.o obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o @@ -69,21 +68,18 @@ cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_USER) += crypto_user.o -crypto_user-y := crypto_user_base.o -crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o -obj-$(CONFIG_CRYPTO_VMAC) += vmac.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o -obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o +obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o -obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o +obj-$(CONFIG_CRYPTO_SHA256) += sha256.o +CFLAGS_sha256.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3.o obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o @@ -99,7 +95,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_XCTR) += xctr.o obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o -obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o @@ -153,15 +148,18 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o -obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o -obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o +obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o +CFLAGS_chacha.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o -obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o -obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o -obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o +crc32c-cryptoapi-y := crc32c.o +CFLAGS_crc32c.o += -DARCH=$(ARCH) +obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o +crc32-cryptoapi-y := crc32.o +CFLAGS_crc32.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o +obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o @@ -176,7 +174,7 @@ KASAN_SANITIZE_jitterentropy.o = n UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o -obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o +obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o @@ -215,3 +213,5 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o # Key derivation function # obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5/ diff --git a/crypto/acompress.c b/crypto/acompress.c index 1c682810a484..be28cbfd22e3 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -8,21 +8,36 @@ */ #include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> #include <linux/cryptouser.h> -#include <linux/errno.h> +#include <linux/cpumask.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/string.h> +#include <linux/workqueue.h> #include <net/netlink.h> #include "compress.h" struct crypto_scomp; +enum { + ACOMP_WALK_SLEEP = 1 << 0, + ACOMP_WALK_SRC_LINEAR = 1 << 1, + ACOMP_WALK_DST_LINEAR = 1 << 2, +}; + static const struct crypto_type crypto_acomp_type; +static void acomp_reqchain_done(void *data, int err); + static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) { return container_of(alg, struct acomp_alg, calg.base); @@ -58,29 +73,54 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); - alg->exit(acomp); + if (alg->exit) + alg->exit(acomp); + + if (acomp_is_async(acomp)) + crypto_free_acomp(crypto_acomp_fb(acomp)); } static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) { struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); + struct crypto_acomp *fb = NULL; + int err; if (tfm->__crt_alg->cra_type != &crypto_acomp_type) return crypto_init_scomp_ops_async(tfm); + if (acomp_is_async(acomp)) { + fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + err = -EINVAL; + if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) + goto out_free_fb; + + tfm->fb = crypto_acomp_tfm(fb); + } + acomp->compress = alg->compress; acomp->decompress = alg->decompress; - acomp->dst_free = alg->dst_free; - acomp->reqsize = alg->reqsize; + acomp->reqsize = alg->base.cra_reqsize; - if (alg->exit) - acomp->base.exit = crypto_acomp_exit_tfm; + acomp->base.exit = crypto_acomp_exit_tfm; - if (alg->init) - return alg->init(acomp); + if (!alg->init) + return 0; + + err = alg->init(acomp); + if (err) + goto out_free_fb; return 0; + +out_free_fb: + crypto_free_acomp(fb); + return err; } static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) @@ -93,32 +133,6 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) return extsize; } -static inline int __crypto_acomp_report_stat(struct sk_buff *skb, - struct crypto_alg *alg) -{ - struct comp_alg_common *calg = __crypto_comp_alg_common(alg); - struct crypto_istat_compress *istat = comp_get_stat(calg); - struct crypto_stat_compress racomp; - - memset(&racomp, 0, sizeof(racomp)); - - strscpy(racomp.type, "acomp", sizeof(racomp.type)); - racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt); - racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen); - racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt); - racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen); - racomp.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); -} - -#ifdef CONFIG_CRYPTO_STATS -int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg) -{ - return __crypto_acomp_report_stat(skb, alg); -} -#endif - static const struct crypto_type crypto_acomp_type = { .extsize = crypto_acomp_extsize, .init_tfm = crypto_acomp_init_tfm, @@ -128,13 +142,11 @@ static const struct crypto_type crypto_acomp_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_acomp_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_acomp_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, .type = CRYPTO_ALG_TYPE_ACOMPRESS, .tfmsize = offsetof(struct crypto_acomp, base), + .algsize = offsetof(struct acomp_alg, base), }; struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, @@ -152,45 +164,152 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); -struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) +static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) +{ + struct acomp_req_chain *state = &req->chain; + + state->compl = req->base.complete; + state->data = req->base.data; + req->base.complete = cplt; + req->base.data = state; +} + +static void acomp_restore_req(struct acomp_req *req) { - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct acomp_req *req; + struct acomp_req_chain *state = req->base.data; - req = __acomp_request_alloc(acomp); - if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) - return crypto_acomp_scomp_alloc_ctx(req); + req->base.complete = state->compl; + req->base.data = state->data; +} - return req; +static void acomp_reqchain_virt(struct acomp_req *req) +{ + struct acomp_req_chain *state = &req->chain; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + + if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) + acomp_request_set_src_dma(req, state->src, slen); + if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) + acomp_request_set_dst_dma(req, state->dst, dlen); } -EXPORT_SYMBOL_GPL(acomp_request_alloc); -void acomp_request_free(struct acomp_req *req) +static void acomp_virt_to_sg(struct acomp_req *req) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct acomp_req_chain *state = &req->chain; - if (tfm->__crt_alg->cra_type != &crypto_acomp_type) - crypto_acomp_scomp_free_ctx(req); + state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); - if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { - acomp->dst_free(req->dst); - req->dst = NULL; + if (acomp_request_src_isvirt(req)) { + unsigned int slen = req->slen; + const u8 *svirt = req->svirt; + + state->src = svirt; + sg_init_one(&state->ssg, svirt, slen); + acomp_request_set_src_sg(req, &state->ssg, slen); + } + + if (acomp_request_dst_isvirt(req)) { + unsigned int dlen = req->dlen; + u8 *dvirt = req->dvirt; + + state->dst = dvirt; + sg_init_one(&state->dsg, dvirt, dlen); + acomp_request_set_dst_sg(req, &state->dsg, dlen); } +} + +static int acomp_do_nondma(struct acomp_req *req, bool comp) +{ + ACOMP_FBREQ_ON_STACK(fbreq, req); + int err; + + if (comp) + err = crypto_acomp_compress(fbreq); + else + err = crypto_acomp_decompress(fbreq); + + req->dlen = fbreq->dlen; + return err; +} + +static int acomp_do_one_req(struct acomp_req *req, bool comp) +{ + if (acomp_request_isnondma(req)) + return acomp_do_nondma(req, comp); + + acomp_virt_to_sg(req); + return comp ? crypto_acomp_reqtfm(req)->compress(req) : + crypto_acomp_reqtfm(req)->decompress(req); +} + +static int acomp_reqchain_finish(struct acomp_req *req, int err) +{ + acomp_reqchain_virt(req); + acomp_restore_req(req); + return err; +} + +static void acomp_reqchain_done(void *data, int err) +{ + struct acomp_req *req = data; + crypto_completion_t compl; + + compl = req->chain.compl; + data = req->chain.data; + + if (err == -EINPROGRESS) + goto notify; + + err = acomp_reqchain_finish(req, err); + +notify: + compl(data, err); +} + +static int acomp_do_req_chain(struct acomp_req *req, bool comp) +{ + int err; + + acomp_save_req(req, acomp_reqchain_done); + + err = acomp_do_one_req(req, comp); + if (err == -EBUSY || err == -EINPROGRESS) + return err; + + return acomp_reqchain_finish(req, err); +} + +int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->compress(req); + return acomp_do_req_chain(req, true); +} +EXPORT_SYMBOL_GPL(crypto_acomp_compress); + +int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - __acomp_request_free(req); + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->decompress(req); + return acomp_do_req_chain(req, false); } -EXPORT_SYMBOL_GPL(acomp_request_free); +EXPORT_SYMBOL_GPL(crypto_acomp_decompress); void comp_prepare_alg(struct comp_alg_common *alg) { - struct crypto_istat_compress *istat = comp_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); } int crypto_register_acomp(struct acomp_alg *alg) @@ -241,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count) } EXPORT_SYMBOL_GPL(crypto_unregister_acomps); +static void acomp_stream_workfn(struct work_struct *work) +{ + struct crypto_acomp_streams *s = + container_of(work, struct crypto_acomp_streams, stream_work); + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu; + + for_each_cpu(cpu, &s->stream_want) { + struct crypto_acomp_stream *ps; + void *ctx; + + ps = per_cpu_ptr(streams, cpu); + if (ps->ctx) + continue; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) + break; + + spin_lock_bh(&ps->lock); + ps->ctx = ctx; + spin_unlock_bh(&ps->lock); + + cpumask_clear_cpu(cpu, &s->stream_want); + } +} + +void crypto_acomp_free_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + void (*free_ctx)(void *); + int i; + + s->streams = NULL; + if (!streams) + return; + + cancel_work_sync(&s->stream_work); + free_ctx = s->free_ctx; + + for_each_possible_cpu(i) { + struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i); + + if (!ps->ctx) + continue; + + free_ctx(ps->ctx); + } + + free_percpu(streams); +} +EXPORT_SYMBOL_GPL(crypto_acomp_free_streams); + +int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams; + struct crypto_acomp_stream *ps; + unsigned int i; + void *ctx; + + if (s->streams) + return 0; + + streams = alloc_percpu(struct crypto_acomp_stream); + if (!streams) + return -ENOMEM; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) { + free_percpu(streams); + return PTR_ERR(ctx); + } + + i = cpumask_first(cpu_possible_mask); + ps = per_cpu_ptr(streams, i); + ps->ctx = ctx; + + for_each_possible_cpu(i) { + ps = per_cpu_ptr(streams, i); + spin_lock_init(&ps->lock); + } + + s->streams = streams; + + INIT_WORK(&s->stream_work, acomp_stream_workfn); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams); + +struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( + struct crypto_acomp_streams *s) __acquires(stream) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu = raw_smp_processor_id(); + struct crypto_acomp_stream *ps; + + ps = per_cpu_ptr(streams, cpu); + spin_lock_bh(&ps->lock); + if (likely(ps->ctx)) + return ps; + spin_unlock(&ps->lock); + + cpumask_set_cpu(cpu, &s->stream_want); + schedule_work(&s->stream_work); + + ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask)); + spin_lock(&ps->lock); + return ps; +} +EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh); + +void acomp_walk_done_src(struct acomp_walk *walk, int used) +{ + walk->slen -= used; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) + scatterwalk_advance(&walk->in, used); + else + scatterwalk_done_src(&walk->in, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_src); + +void acomp_walk_done_dst(struct acomp_walk *walk, int used) +{ + walk->dlen -= used; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) + scatterwalk_advance(&walk->out, used); + else + scatterwalk_done_dst(&walk->out, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_dst); + +int acomp_walk_next_src(struct acomp_walk *walk) +{ + unsigned int slen = walk->slen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.__addr = (void *)(((u8 *)walk->in.sg) + + walk->in.offset); + return min(slen, max); + } + + return slen ? scatterwalk_next(&walk->in, slen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_src); + +int acomp_walk_next_dst(struct acomp_walk *walk) +{ + unsigned int dlen = walk->dlen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.__addr = (void *)(((u8 *)walk->out.sg) + + walk->out.offset); + return min(dlen, max); + } + + return dlen ? scatterwalk_next(&walk->out, dlen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_dst); + +int acomp_walk_virt(struct acomp_walk *__restrict walk, + struct acomp_req *__restrict req, bool atomic) +{ + struct scatterlist *src = req->src; + struct scatterlist *dst = req->dst; + + walk->slen = req->slen; + walk->dlen = req->dlen; + + if (!walk->slen || !walk->dlen) + return -EINVAL; + + walk->flags = 0; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags |= ACOMP_WALK_SLEEP; + if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT)) + walk->flags |= ACOMP_WALK_SRC_LINEAR; + if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT)) + walk->flags |= ACOMP_WALK_DST_LINEAR; + + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.sg = (void *)req->svirt; + walk->in.offset = 0; + } else + scatterwalk_start(&walk->in, src); + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.sg = (void *)req->dvirt; + walk->out.offset = 0; + } else + scatterwalk_start(&walk->out, dst); + + return 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_virt); + +struct acomp_req *acomp_request_clone(struct acomp_req *req, + size_t total, gfp_t gfp) +{ + struct acomp_req *nreq; + + nreq = container_of(crypto_request_clone(&req->base, total, gfp), + struct acomp_req, base); + if (nreq == req) + return req; + + if (req->src == &req->chain.ssg) + nreq->src = &nreq->chain.ssg; + if (req->dst == &req->chain.dsg) + nreq->dst = &nreq->chain.dsg; + return nreq; +} +EXPORT_SYMBOL_GPL(acomp_request_clone); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous compression type"); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 60f3883b736a..a6bca877c3c7 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -639,11 +639,11 @@ static void __exit adiantum_module_exit(void) crypto_unregister_template(&adiantum_tmpl); } -subsys_initcall(adiantum_module_init); +module_init(adiantum_module_init); module_exit(adiantum_module_exit); MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); MODULE_ALIAS_CRYPTO("adiantum"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/aead.c b/crypto/aead.c index 54906633566a..5d14b775036e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -16,19 +16,11 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "internal.h" -static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -45,8 +37,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -90,62 +81,28 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) } EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); -static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&istat->err_cnt); - - return err; -} - int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct aead_alg *alg = crypto_aead_alg(aead); - struct crypto_istat_aead *istat; - int ret; - - istat = aead_get_stat(alg); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = alg->encrypt(req); + return -ENOKEY; - return crypto_aead_errstat(istat, ret); + return crypto_aead_alg(aead)->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_encrypt); int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct aead_alg *alg = crypto_aead_alg(aead); - struct crypto_istat_aead *istat; - int ret; - - istat = aead_get_stat(alg); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (req->cryptlen < crypto_aead_authsize(aead)) - ret = -EINVAL; - else - ret = alg->decrypt(req); + return -ENOKEY; + + if (req->cryptlen < crypto_aead_authsize(aead)) + return -EINVAL; - return crypto_aead_errstat(istat, ret); + return crypto_aead_alg(aead)->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_aead_decrypt); @@ -200,8 +157,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) struct aead_alg *aead = container_of(alg, struct aead_alg, base); seq_printf(m, "type : aead\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); @@ -215,26 +172,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst) aead->free(aead); } -static int __maybe_unused crypto_aead_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct aead_alg *aead = container_of(alg, struct aead_alg, base); - struct crypto_istat_aead *istat = aead_get_stat(aead); - struct crypto_stat_aead raead; - - memset(&raead, 0, sizeof(raead)); - - strscpy(raead.type, "aead", sizeof(raead.type)); - - raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - raead.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); -} - static const struct crypto_type crypto_aead_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_aead_init_tfm, @@ -245,13 +182,11 @@ static const struct crypto_type crypto_aead_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_aead_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_aead_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_AEAD, .tfmsize = offsetof(struct crypto_aead, base), + .algsize = offsetof(struct aead_alg, base), }; int crypto_grab_aead(struct crypto_aead_spawn *spawn, @@ -277,7 +212,6 @@ EXPORT_SYMBOL_GPL(crypto_has_aead); static int aead_prepare_alg(struct aead_alg *alg) { - struct crypto_istat_aead *istat = aead_get_stat(alg); struct crypto_alg *base = &alg->base; if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) > @@ -291,9 +225,6 @@ static int aead_prepare_alg(struct aead_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index c4f1bfa1d04f..ca80d861345d 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; if (pos + size >= AEGIS_BLOCK_SIZE) { if (pos > 0) { @@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, pos += left; assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, size); } if (pos > 0) { @@ -323,8 +320,9 @@ static __always_inline int crypto_aegis128_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, void (*crypt)(struct aegis_state *state, - u8 *dst, const u8 *src, - unsigned int size)) + u8 *dst, + const u8 *src, + unsigned int size)) { int err = 0; @@ -515,7 +513,6 @@ static struct aead_alg crypto_aegis128_alg_generic = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 100, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-generic", @@ -534,7 +531,6 @@ static struct aead_alg crypto_aegis128_alg_simd = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 200, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-simd", @@ -570,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void) crypto_unregister_aead(&crypto_aegis128_alg_generic); } -subsys_initcall(crypto_aegis128_module_init); +module_init(crypto_aegis128_module_init); module_exit(crypto_aegis128_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 666474b81c6a..85d2e78c8ef2 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -54,7 +54,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static inline u8 byte(const u32 x, const unsigned n) { @@ -1311,7 +1311,7 @@ static void __exit aes_fini(void) crypto_unregister_alg(&aes_alg); } -subsys_initcall(aes_init); +module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 68cc9290cabe..0da7c1ac778a 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -407,7 +407,8 @@ unlock: return err; } -int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) +int af_alg_accept(struct sock *sk, struct socket *newsock, + struct proto_accept_arg *arg) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; @@ -422,7 +423,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (!type) goto unlock; - sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern); + sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern); err = -ENOMEM; if (!sk2) goto unlock; @@ -468,10 +469,10 @@ unlock: } EXPORT_SYMBOL_GPL(af_alg_accept); -static int alg_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) +static int alg_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) { - return af_alg_accept(sock->sk, newsock, kern); + return af_alg_accept(sock->sk, newsock, arg); } static const struct proto_ops alg_proto_ops = { @@ -847,7 +848,7 @@ void af_alg_wmem_wakeup(struct sock *sk) wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); @@ -914,7 +915,7 @@ static void af_alg_data_wakeup(struct sock *sk) wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | EPOLLRDNORM | EPOLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } @@ -1316,5 +1317,6 @@ static void __exit af_alg_exit(void) module_init(af_alg_init); module_exit(af_alg_exit); +MODULE_DESCRIPTION("Crypto userspace interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_ALG); diff --git a/crypto/ahash.c b/crypto/ahash.c index 0ac83f7f701d..bc84a07c924c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -16,31 +16,154 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "hash.h" #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) +struct crypto_hash_walk { + const char *data; + + unsigned int offset; + unsigned int flags; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; +}; + +static int ahash_def_finup(struct ahash_request *req); + +static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_ALG_NEED_FALLBACK; +} + +static inline void ahash_op_done(void *data, int err, + int (*finish)(struct ahash_request *, int)) +{ + struct ahash_request *areq = data; + crypto_completion_t compl; + + compl = areq->saved_complete; + data = areq->saved_data; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + compl(data, err); +} + +static int hash_walk_next(struct crypto_hash_walk *walk) { - return hash_get_stat(&alg->halg); + unsigned int offset = walk->offset; + unsigned int nbytes = min(walk->entrylen, + ((unsigned int)(PAGE_SIZE)) - offset); + + walk->data = kmap_local_page(walk->pg); + walk->data += offset; + walk->entrylen -= nbytes; + return nbytes; } -static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) +static int hash_walk_new_entry(struct crypto_hash_walk *walk) { - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + struct scatterlist *sg; + + sg = walk->sg; + walk->offset = sg->offset; + walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT)); + walk->offset = offset_in_page(walk->offset); + walk->entrylen = sg->length; + + if (walk->entrylen > walk->total) + walk->entrylen = walk->total; + walk->total -= walk->entrylen; + + return hash_walk_next(walk); +} + +static int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) +{ + walk->total = req->nbytes; + walk->entrylen = 0; + + if (!walk->total) + return 0; + + walk->flags = req->base.flags; + + if (ahash_request_isvirt(req)) { + walk->data = req->svirt; + walk->total = 0; + return req->nbytes; + } + + walk->sg = req->src; + + return hash_walk_new_entry(walk); +} + +static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) return err; - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&ahash_get_stat(alg)->err_cnt); + walk->data -= walk->offset; - return err; + kunmap_local(walk->data); + crypto_yield(walk->flags); + + if (err) + return err; + + if (walk->entrylen) { + walk->offset = 0; + walk->pg++; + return hash_walk_next(walk); + } + + if (!walk->total) + return 0; + + walk->sg = sg_next(walk->sg); + + return hash_walk_new_entry(walk); +} + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); } /* @@ -100,21 +223,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) unsigned int nbytes = req->nbytes; struct scatterlist *sg; unsigned int offset; + struct page *page; + const u8 *data; int err; - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; + data = req->svirt; + if (!nbytes || ahash_request_isvirt(req)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + sg = req->src; + if (nbytes > sg->length) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); + page = sg_page(sg); + offset = sg->offset; + data = lowmem_page_address(page) + offset; + if (!IS_ENABLED(CONFIG_HIGHMEM)) + return crypto_shash_digest(desc, data, nbytes, req->result); + page = nth_page(page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + + if (nbytes > (unsigned int)PAGE_SIZE - offset) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + data = kmap_local_page(page); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); return err; } EXPORT_SYMBOL_GPL(shash_ahash_digest); @@ -148,82 +286,10 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } -static int hash_walk_next(struct crypto_hash_walk *walk) -{ - unsigned int offset = walk->offset; - unsigned int nbytes = min(walk->entrylen, - ((unsigned int)(PAGE_SIZE)) - offset); - - walk->data = kmap_local_page(walk->pg); - walk->data += offset; - walk->entrylen -= nbytes; - return nbytes; -} - -static int hash_walk_new_entry(struct crypto_hash_walk *walk) -{ - struct scatterlist *sg; - - sg = walk->sg; - walk->offset = sg->offset; - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); - walk->offset = offset_in_page(walk->offset); - walk->entrylen = sg->length; - - if (walk->entrylen > walk->total) - walk->entrylen = walk->total; - walk->total -= walk->entrylen; - - return hash_walk_next(walk); -} - -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) -{ - walk->data -= walk->offset; - - kunmap_local(walk->data); - crypto_yield(walk->flags); - - if (err) - return err; - - if (walk->entrylen) { - walk->offset = 0; - walk->pg++; - return hash_walk_next(walk); - } - - if (!walk->total) - return 0; - - walk->sg = sg_next(walk->sg); - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_done); - -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) -{ - walk->total = req->nbytes; - - if (!walk->total) { - walk->entrylen = 0; - return 0; - } - - walk->sg = req->src; - walk->flags = req->base.flags; - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_first); - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -256,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, int err; err = alg->setkey(tfm, key, keylen); + if (!err && crypto_ahash_need_fallback(tfm)) + err = crypto_ahash_setkey(crypto_ahash_fb(tfm), + key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm, alg); return err; @@ -266,6 +335,49 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +static int ahash_do_req_chain(struct ahash_request *req, + int (*const *op)(struct ahash_request *req)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int err; + + if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req)) + return (*op)(req); + + if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) + return -ENOSYS; + + { + u8 state[HASH_MAX_STATESIZE]; + + if (op == &crypto_ahash_alg(tfm)->digest) { + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = crypto_ahash_digest(req); + goto out_no_state; + } + + err = crypto_ahash_export(req, state); + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = err ?: crypto_ahash_import(req, state); + + if (op == &crypto_ahash_alg(tfm)->finup) { + err = err ?: crypto_ahash_finup(req); + goto out_no_state; + } + + err = err ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, state); + + ahash_request_set_tfm(req, tfm); + return err ?: crypto_ahash_import(req, state); + +out_no_state: + ahash_request_set_tfm(req, tfm); + return err; + } +} + int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -274,145 +386,204 @@ int crypto_ahash_init(struct ahash_request *req) return crypto_shash_init(prepare_shash_desc(req, tfm)); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (crypto_ahash_block_only(tfm)) { + u8 *buf = ahash_request_ctx(req); + + buf += crypto_ahash_reqsize(tfm) - 1; + *buf = 0; + } return crypto_ahash_alg(tfm)->init(req); } EXPORT_SYMBOL_GPL(crypto_ahash_init); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, - bool has_state) +static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request *subreq; - unsigned int subreq_size; - unsigned int reqsize; - u8 *result; - gfp_t gfp; - u32 flags; - - subreq_size = sizeof(*subreq); - reqsize = crypto_ahash_reqsize(tfm); - reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); - subreq_size += reqsize; - subreq_size += ds; - - flags = ahash_request_flags(req); - gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - subreq = kmalloc(subreq_size, gfp); - if (!subreq) - return -ENOMEM; - - ahash_request_set_tfm(subreq, tfm); - ahash_request_set_callback(subreq, flags, cplt, req); - - result = (u8 *)(subreq + 1) + reqsize; - - ahash_request_set_crypt(subreq, req->src, result, req->nbytes); - - if (has_state) { - void *state; - - state = kmalloc(crypto_ahash_statesize(tfm), gfp); - if (!state) { - kfree(subreq); - return -ENOMEM; - } - - crypto_ahash_export(req, state); - crypto_ahash_import(subreq, state); - kfree_sensitive(state); - } - - req->priv = subreq; + req->saved_complete = req->base.complete; + req->saved_data = req->base.data; + req->base.complete = cplt; + req->base.data = req; +} - return 0; +static void ahash_restore_req(struct ahash_request *req) +{ + req->base.complete = req->saved_complete; + req->base.data = req->saved_data; } -static void ahash_restore_req(struct ahash_request *req, int err) +static int ahash_update_finish(struct ahash_request *req, int err) { - struct ahash_request *subreq = req->priv; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + u8 *buf; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + + req->nbytes += nonzero - blen; - if (!err) - memcpy(req->result, subreq->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + blen = err < 0 ? 0 : err + nonzero; + if (ahash_request_isvirt(req)) + memcpy(buf, req->svirt + req->nbytes - blen, blen); + else + memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen); + *blenp = blen; - req->priv = NULL; + ahash_restore_req(req); - kfree_sensitive(subreq); + return err; +} + +static void ahash_update_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_update_finish); } int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; if (likely(tfm->using_shash)) return shash_ahash_update(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen + req->nbytes < bs + nonzero) { + if (ahash_request_isvirt(req)) + memcpy(buf + blen, req->svirt, req->nbytes); + else + memcpy_from_sglist(buf + blen, req->src, 0, + req->nbytes); + + *blenp += req->nbytes; + return 0; + } + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } + req->nbytes -= nonzero; - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); - return crypto_ahash_errstat(alg, alg->update(req)); + ahash_save_req(req, ahash_update_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_update_finish(req, err); } EXPORT_SYMBOL_GPL(crypto_ahash_update); -int crypto_ahash_final(struct ahash_request *req) +static int ahash_finup_finish(struct ahash_request *req, int err) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + u8 *blenp = ahash_request_ctx(req); + int blen; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + + if (blen) { + if (sg_is_last(req->src)) + req->src = NULL; + else { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + req->nbytes -= blen; + } - if (likely(tfm->using_shash)) - return crypto_shash_final(ahash_request_ctx(req), req->result); + ahash_restore_req(req); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&ahash_get_stat(alg)->hash_cnt); - return crypto_ahash_errstat(alg, alg->final(req)); + return err; +} + +static void ahash_finup_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_finup_finish); } -EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; if (likely(tfm->using_shash)) return shash_ahash_finup(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (!req->src) + sg_mark_end(req->sg_head); + else if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); + ahash_save_req(req, ahash_finup_done); - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - return crypto_ahash_errstat(alg, alg->finup(req)); + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_finup_finish(req, err); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; - int err; if (likely(tfm->using_shash)) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); - - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - err = -ENOKEY; - else - err = alg->digest(req); - - return crypto_ahash_errstat(alg, err); + return -ENOKEY; + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -423,74 +594,87 @@ static void ahash_def_finup_done2(void *data, int err) if (err == -EINPROGRESS) return; - ahash_restore_req(areq, err); - + ahash_restore_req(areq); ahash_request_complete(areq, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) { - struct ahash_request *subreq = req->priv; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (err) goto out; - subreq->base.complete = ahash_def_finup_done2; + req->base.complete = ahash_def_finup_done2; - err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); + err = crypto_ahash_alg(tfm)->final(req); if (err == -EINPROGRESS || err == -EBUSY) return err; out: - ahash_restore_req(req, err); + ahash_restore_req(req); return err; } static void ahash_def_finup_done1(void *data, int err) { - struct ahash_request *areq = data; - struct ahash_request *subreq; - - if (err == -EINPROGRESS) - goto out; - - subreq = areq->priv; - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; - - err = ahash_def_finup_finish1(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; - -out: - ahash_request_complete(areq, err); + ahash_op_done(data, err, ahash_def_finup_finish1); } static int ahash_def_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; - err = ahash_save_req(req, ahash_def_finup_done1, true); - if (err) - return err; + ahash_save_req(req, ahash_def_finup_done1); - err = crypto_ahash_alg(tfm)->update(req->priv); + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export_core(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export_core(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export_core(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export_core); + int crypto_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_export(ahash_request_ctx(req), out); + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(out + ss - plen, buf + reqsize - plen, plen); + } return crypto_ahash_alg(tfm)->export(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export); +int crypto_ahash_import_core(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import_core(prepare_shash_desc(req, tfm), + in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import_core(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import_core); + int crypto_ahash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -499,6 +683,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int reqsize = crypto_ahash_reqsize(tfm); + u8 *buf = ahash_request_ctx(req); + + buf[reqsize - 1] = 0; + } return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); @@ -508,25 +698,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - alg->exit_tfm(hash); + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + + if (crypto_ahash_need_fallback(hash)) + crypto_free_ahash(crypto_ahash_fb(hash)); } static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); + struct crypto_ahash *fb = NULL; + int err; crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm)); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash), + CRYPTO_ALG_REQ_VIRT, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_REQ_VIRT | + CRYPTO_AHASH_ALG_NO_EXPORT_CORE); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + tfm->fb = crypto_ahash_tfm(fb); + } + ahash_set_needkey(hash, alg); - if (alg->exit_tfm) - tfm->exit = crypto_ahash_exit_tfm; + tfm->exit = crypto_ahash_exit_tfm; + + if (alg->init_tfm) + err = alg->init_tfm(hash); + else if (tfm->__crt_alg->cra_init) + err = tfm->__crt_alg->cra_init(tfm); + else + return 0; + + if (err) + goto out_free_sync_hash; + + if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) > + MAX_SYNC_HASH_REQSIZE) + goto out_exit_tfm; + + BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE); + if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE) + crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE); + + return 0; - return alg->init_tfm ? alg->init_tfm(hash) : 0; +out_exit_tfm: + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + err = -EINVAL; +out_free_sync_hash: + crypto_free_ahash(fb); + return err; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) @@ -564,19 +802,13 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", __crypto_hash_alg_common(alg)->digestsize); } -static int __maybe_unused crypto_ahash_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - return crypto_hash_report_stat(skb, alg, "ahash"); -} - static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, @@ -587,13 +819,11 @@ static const struct crypto_type crypto_ahash_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_ahash_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), + .algsize = offsetof(struct ahash_alg, halg.base), }; int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, @@ -618,7 +848,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); -static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; @@ -627,11 +857,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { struct hash_alg_common *halg = crypto_hash_alg_common(hash); struct crypto_tfm *tfm = crypto_ahash_tfm(hash); + struct crypto_ahash *fb = NULL; struct crypto_ahash *nhash; struct ahash_alg *alg; int err; @@ -661,28 +893,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) err = PTR_ERR(shash); goto out_free_nhash; } + crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash; nhash->using_shash = true; *nctx = shash; return nhash; } + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_clone_ahash(crypto_ahash_fb(hash)); + err = PTR_ERR(fb); + if (IS_ERR(fb)) + goto out_free_nhash; + + crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb); + } + err = -ENOSYS; alg = crypto_ahash_alg(hash); if (!alg->clone_tfm) - goto out_free_nhash; + goto out_free_fb; err = alg->clone_tfm(nhash, hash); if (err) - goto out_free_nhash; + goto out_free_fb; + + crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm; return nhash; +out_free_fb: + crypto_free_ahash(fb); out_free_nhash: crypto_free_ahash(nhash); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_clone_ahash); +static int ahash_default_export_core(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_default_import_core(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; @@ -691,6 +947,13 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL; + if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize) + return -EINVAL; + + if (!(base->cra_flags & CRYPTO_ALG_ASYNC) && + base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; @@ -698,11 +961,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; - if (!alg->finup) - alg->finup = ahash_def_finup; + if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + if (!alg->setkey) alg->setkey = ahash_nosetkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + if (!alg->finup) + return -EINVAL; + + base->cra_reqsize += base->cra_blocksize + 1; + alg->halg.statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = ahash_default_export_core; + alg->import_core = ahash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + return 0; } @@ -770,5 +1050,42 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); +void ahash_request_free(struct ahash_request *req) +{ + if (unlikely(!req)) + return; + + if (!ahash_req_on_stack(req)) { + kfree(req); + return; + } + + ahash_request_zero(req); +} +EXPORT_SYMBOL_GPL(ahash_request_free); + +int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm)); + int err; + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, out, len); + err = crypto_ahash_digest(req); + + ahash_request_zero(req); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_hash_digest); + +void ahash_free_singlespawn_instance(struct ahash_instance *inst) +{ + crypto_drop_spawn(ahash_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 52813f0b19e4..a36f50c83827 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -20,6 +20,19 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + static int __maybe_unused crypto_akcipher_report( struct sk_buff *skb, struct crypto_alg *alg) { @@ -70,30 +83,6 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst) akcipher->free(akcipher); } -static int __maybe_unused crypto_akcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg); - struct crypto_istat_akcipher *istat; - struct crypto_stat_akcipher rakcipher; - - istat = akcipher_get_stat(akcipher); - - memset(&rakcipher, 0, sizeof(rakcipher)); - - strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); - rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt); - rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt); - rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, - sizeof(rakcipher), &rakcipher); -} - static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, @@ -104,13 +93,11 @@ static const struct crypto_type crypto_akcipher_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_akcipher_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_akcipher_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AKCIPHER, .tfmsize = offsetof(struct crypto_akcipher, base), + .algsize = offsetof(struct akcipher_alg, base), }; int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, @@ -131,15 +118,11 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); static void akcipher_prepare_alg(struct akcipher_alg *alg) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); } static int akcipher_default_op(struct akcipher_request *req) @@ -157,10 +140,6 @@ int crypto_register_akcipher(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; - if (!alg->sign) - alg->sign = akcipher_default_op; - if (!alg->verify) - alg->verify = akcipher_default_op; if (!alg->encrypt) alg->encrypt = akcipher_default_op; if (!alg->decrypt) @@ -189,7 +168,7 @@ int akcipher_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(akcipher_register_instance); -int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) +static int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) { unsigned int reqsize = crypto_akcipher_reqsize(data->tfm); struct akcipher_request *req; @@ -198,10 +177,7 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) unsigned int len; u8 *buf; - if (data->dst) - mlen = max(data->slen, data->dlen); - else - mlen = data->slen + data->dlen; + mlen = max(data->slen, data->dlen); len = sizeof(*req) + reqsize + mlen; if (len < mlen) @@ -220,8 +196,7 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) sg = &data->sg; sg_init_one(sg, buf, mlen); - akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL, - data->slen, data->dlen); + akcipher_request_set_crypt(req, sg, sg, data->slen, data->dlen); crypto_init_wait(&data->cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, @@ -229,18 +204,16 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) return 0; } -EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep); -int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err) +static int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, + int err) { err = crypto_wait_req(err, &data->cwait); - if (data->dst) - memcpy(data->dst, data->buf, data->dlen); + memcpy(data->dst, data->buf, data->dlen); data->dlen = data->req->dst_len; kfree_sensitive(data->req); return err; } -EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post); int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, const void *src, unsigned int slen, @@ -279,34 +252,5 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, } EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt); -static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm) -{ - struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); - - crypto_free_akcipher(*ctx); -} - -int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm) -{ - struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); - struct crypto_alg *calg = tfm->__crt_alg; - struct crypto_akcipher *akcipher; - - if (!crypto_mod_get(calg)) - return -EAGAIN; - - akcipher = crypto_create_tfm(calg, &crypto_akcipher_type); - if (IS_ERR(akcipher)) { - crypto_mod_put(calg); - return PTR_ERR(akcipher); - } - - *ctx = akcipher; - tfm->exit = crypto_exit_akcipher_ops_sig; - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 85bc279b4233..e604d0d8b7b4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fips.h> @@ -23,11 +22,6 @@ static LIST_HEAD(crypto_template_list); -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS -DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test); -EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test); -#endif - static inline void crypto_check_module_sig(struct module *mod) { if (fips_enabled && mod && !module_sig_ok(mod)) @@ -77,12 +71,23 @@ static void crypto_free_instance(struct crypto_instance *inst) static void crypto_destroy_instance_workfn(struct work_struct *w) { - struct crypto_instance *inst = container_of(w, struct crypto_instance, + struct crypto_template *tmpl = container_of(w, struct crypto_template, free_work); - struct crypto_template *tmpl = inst->tmpl; + struct crypto_instance *inst; + struct hlist_node *n; + HLIST_HEAD(list); + + down_write(&crypto_alg_sem); + hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) { + if (refcount_read(&inst->alg.cra_refcnt) != -1) + continue; + hlist_del(&inst->list); + hlist_add_head(&inst->list, &list); + } + up_write(&crypto_alg_sem); - crypto_free_instance(inst); - crypto_tmpl_put(tmpl); + hlist_for_each_entry_safe(inst, n, &list, list) + crypto_free_instance(inst); } static void crypto_destroy_instance(struct crypto_alg *alg) @@ -90,9 +95,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg) struct crypto_instance *inst = container_of(alg, struct crypto_instance, alg); + struct crypto_template *tmpl = inst->tmpl; - INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn); - schedule_work(&inst->free_work); + refcount_set(&alg->cra_refcnt, -1); + schedule_work(&tmpl->free_work); } /* @@ -138,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst, inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (!tmpl || !crypto_tmpl_get(tmpl)) + if (!tmpl) return; - list_move(&inst->alg.cra_list, list); + list_del_init(&inst->alg.cra_list); hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + hlist_add_head(&inst->list, &tmpl->dead); BUG_ON(!list_empty(&inst->alg.cra_users)); + + crypto_alg_put(&inst->alg); } /* @@ -235,7 +243,6 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, EXPORT_SYMBOL_GPL(crypto_remove_spawns); static void crypto_alg_finish_registration(struct crypto_alg *alg, - bool fulfill_requests, struct list_head *algs_to_put) { struct crypto_alg *q; @@ -247,30 +254,8 @@ static void crypto_alg_finish_registration(struct crypto_alg *alg, if (crypto_is_moribund(q)) continue; - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - - if (fulfill_requests && crypto_mod_get(alg)) - larval->adult = alg; - else - larval->adult = ERR_PTR(-EAGAIN); - + if (crypto_is_larval(q)) continue; - } if (strcmp(alg->cra_name, q->cra_name)) continue; @@ -289,8 +274,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) { struct crypto_larval *larval; - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) || - IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) || + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) || (alg->cra_flags & CRYPTO_ALG_INTERNAL)) return NULL; /* No self-test needed */ @@ -359,7 +343,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) list_add(&larval->alg.cra_list, &crypto_alg_list); } else { alg->cra_flags |= CRYPTO_ALG_TESTED; - crypto_alg_finish_registration(alg, true, algs_to_put); + crypto_alg_finish_registration(alg, algs_to_put); } out: @@ -376,7 +360,6 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); - bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -390,13 +373,14 @@ void crypto_alg_tested(const char *name, int err) } pr_err("alg: Unexpected test result for %s: %d\n", name, err); - goto unlock; + up_write(&crypto_alg_sem); + return; found: q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (list_empty(&alg->cra_list)) + if (crypto_is_dead(alg)) goto complete; if (err == -ECANCELED) @@ -408,32 +392,15 @@ found: alg->cra_flags |= CRYPTO_ALG_TESTED; - /* - * If a higher-priority implementation of the same algorithm is - * currently being tested, then don't fulfill request larvals. - */ - best = true; - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (crypto_is_moribund(q) || !crypto_is_larval(q)) - continue; - - if (strcmp(alg->cra_name, q->cra_name)) - continue; - - if (q->cra_priority > alg->cra_priority) { - best = false; - break; - } - } - - crypto_alg_finish_registration(alg, best, &list); + crypto_alg_finish_registration(alg, &list); complete: + list_del_init(&test->alg.cra_list); complete_all(&test->completion); -unlock: up_write(&crypto_alg_sem); + crypto_alg_put(&test->alg); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_alg_tested); @@ -450,11 +417,20 @@ void crypto_remove_final(struct list_head *list) } EXPORT_SYMBOL_GPL(crypto_remove_final); +static void crypto_free_alg(struct crypto_alg *alg) +{ + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + crypto_destroy_alg(alg); + kfree(p); +} + int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; - LIST_HEAD(algs_to_put); bool test_started = false; + LIST_HEAD(algs_to_put); int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -462,6 +438,19 @@ int crypto_register_alg(struct crypto_alg *alg) if (err) return err; + if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST && + !WARN_ON_ONCE(alg->cra_destroy)) { + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL); + if (!p) + return -ENOMEM; + + alg = (void *)(p + algsize); + alg->cra_destroy = crypto_free_alg; + } + down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg, &algs_to_put); if (!IS_ERR_OR_NULL(larval)) { @@ -470,11 +459,16 @@ int crypto_register_alg(struct crypto_alg *alg) } up_write(&crypto_alg_sem); - if (IS_ERR(larval)) + if (IS_ERR(larval)) { + crypto_alg_put(alg); return PTR_ERR(larval); + } + if (test_started) - crypto_wait_for_test(larval); - crypto_remove_final(&algs_to_put); + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); + return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -504,12 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) return; - if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) - return; - - if (alg->cra_destroy) - alg->cra_destroy(alg); + WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1); + list_add(&alg->cra_list, &list); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_unregister_alg); @@ -548,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl) struct crypto_template *q; int err = -EEXIST; + INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn); + down_write(&crypto_alg_sem); crypto_check_module_sig(tmpl->module); @@ -609,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl) crypto_free_instance(inst); } crypto_remove_final(&users); + + flush_work(&tmpl->free_work); } EXPORT_SYMBOL_GPL(crypto_unregister_template); @@ -662,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl, inst->alg.cra_module = tmpl->module; inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; + inst->alg.cra_destroy = crypto_destroy_instance; down_write(&crypto_alg_sem); @@ -699,9 +695,12 @@ unlock: if (IS_ERR(larval)) return PTR_ERR(larval); + if (larval) - crypto_wait_for_test(larval); - crypto_remove_final(&algs_to_put); + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); + return 0; } EXPORT_SYMBOL_GPL(crypto_register_instance); @@ -924,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg) +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return 0; } -EXPORT_SYMBOL_GPL(crypto_inst_setname); +EXPORT_SYMBOL_GPL(__crypto_inst_setname); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { @@ -995,7 +994,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) queue->backlog = queue->backlog->next; request = queue->list.next; - list_del(request); + list_del_init(request); return list_entry(request, struct crypto_async_request, list); } @@ -1056,9 +1055,14 @@ EXPORT_SYMBOL_GPL(crypto_type_has_alg); static void __init crypto_start_tests(void) { - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI)) + return; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return; + set_crypto_boot_test_finished(); + for (;;) { struct crypto_larval *larval = NULL; struct crypto_alg *q; @@ -1089,10 +1093,8 @@ static void __init crypto_start_tests(void) if (!larval) break; - crypto_wait_for_test(larval); + crypto_schedule_test(larval); } - - set_crypto_boot_test_finished(); } static int __init crypto_algapi_init(void) diff --git a/crypto/algboss.c b/crypto/algboss.c index 0de1e6697949..846f586889ee 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data) { struct cryptomgr_param *param = data; struct crypto_template *tmpl; - int err; + int err = -ENOENT; tmpl = crypto_lookup_template(param->template); if (!tmpl) @@ -64,6 +64,8 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: + param->larval->adult = ERR_PTR(err); + param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD; complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); @@ -138,9 +140,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) goto err_free_param; } - if (!i) - goto err_free_param; - param->tb[i + 1] = NULL; param->type.attr.rta_len = sizeof(param->type); @@ -190,7 +189,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) struct task_struct *thread; struct crypto_test_param *param; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return NOTIFY_DONE; if (!try_module_get(THIS_MODULE)) @@ -248,13 +247,7 @@ static void __exit cryptomgr_exit(void) BUG_ON(err); } -/* - * This is arch_initcall() so that the crypto self-tests are run on algorithms - * registered early by subsys_initcall(). subsys_initcall() is needed for - * generic implementations so that they're available for comparison tests when - * other implementations are registered later by module_init(). - */ -arch_initcall(cryptomgr_init); +module_init(cryptomgr_init); module_exit(cryptomgr_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 7d58cbbce4af..79b016a899a1 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -27,7 +27,6 @@ #include <crypto/scatterwalk.h> #include <crypto/if_alg.h> #include <crypto/skcipher.h> -#include <crypto/null.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> @@ -36,19 +35,13 @@ #include <linux/net.h> #include <net/sock.h> -struct aead_tfm { - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; -}; - static inline bool aead_sufficient_data(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int as = crypto_aead_authsize(tfm); /* @@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivsize = crypto_aead_ivsize(tfm); return af_alg_sendmsg(sock, msg, size, ivsize); } -static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, - struct scatterlist *src, - struct scatterlist *dst, unsigned int len) -{ - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); - - skcipher_request_set_sync_tfm(skreq, null_tfm); - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(skreq, src, dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; - struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm; + struct crypto_aead *tfm = pask->private; unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; struct af_alg_tsgl *tsgl, *tmp; @@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sgt.sgl, - processed); - if (err) - goto free; + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, + processed); af_alg_pull_tsgl(sk, processed, NULL, 0); } else { /* @@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * RX SGL: AAD || CT ----+ */ - /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sgt.sgl, - outlen); - if (err) - goto free; + /* Copy AAD || CT to RX SGL buffer for in-place operation. */ + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen); /* Create TX SGL for tag and chain it to RX SGL. */ areq->tsgl_entries = af_alg_count_tsgl(sk, processed, @@ -379,7 +348,7 @@ static int aead_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct aead_tfm *tfm; + struct crypto_aead *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -393,7 +362,7 @@ static int aead_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; atomic_dec(&pask->nokey_refcnt); @@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_nokey = { static void *aead_bind(const char *name, u32 type, u32 mask) { - struct aead_tfm *tfm; - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - aead = crypto_alloc_aead(name, type, mask); - if (IS_ERR(aead)) { - kfree(tfm); - return ERR_CAST(aead); - } - - null_tfm = crypto_get_default_null_skcipher(); - if (IS_ERR(null_tfm)) { - crypto_free_aead(aead); - kfree(tfm); - return ERR_CAST(null_tfm); - } - - tfm->aead = aead; - tfm->null_tfm = null_tfm; - - return tfm; + return crypto_alloc_aead(name, type, mask); } static void aead_release(void *private) { - struct aead_tfm *tfm = private; - - crypto_free_aead(tfm->aead); - crypto_put_default_null_skcipher(); - kfree(tfm); + crypto_free_aead(private); } static int aead_setauthsize(void *private, unsigned int authsize) { - struct aead_tfm *tfm = private; - - return crypto_aead_setauthsize(tfm->aead, authsize); + return crypto_aead_setauthsize(private, authsize); } static int aead_setkey(void *private, const u8 *key, unsigned int keylen) { - struct aead_tfm *tfm = private; - - return crypto_aead_setkey(tfm->aead, key, keylen); + return crypto_aead_setkey(private, key, keylen); } static void aead_sock_destruct(struct sock *sk) @@ -510,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk) struct af_alg_ctx *ctx = ask->private; struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); @@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) { struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - struct aead_tfm *tfm = private; - struct crypto_aead *aead = tfm->aead; + struct crypto_aead *tfm = private; unsigned int len = sizeof(*ctx); - unsigned int ivlen = crypto_aead_ivsize(aead); + unsigned int ivlen = crypto_aead_ivsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) static int aead_accept_parent(void *private, struct sock *sk) { - struct aead_tfm *tfm = private; + struct crypto_aead *tfm = private; - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return aead_accept_parent_nokey(private, sk); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index e24c829d7a01..e3f1a4852737 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -223,8 +223,8 @@ unlock: return err ?: len; } -static int hash_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) +static int hash_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -252,7 +252,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags, if (err) goto out_free_state; - err = af_alg_accept(ask->parent, newsock, kern); + err = af_alg_accept(ask->parent, newsock, arg); if (err) goto out_free_state; @@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags, goto out_free_state; err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - } out_free_state: kfree_sensitive(state); @@ -355,7 +351,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, } static int hash_accept_nokey(struct socket *sock, struct socket *newsock, - int flags, bool kern) + struct proto_accept_arg *arg) { int err; @@ -363,7 +359,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock, if (err) return err; - return hash_accept(sock, newsock, flags, kern); + return hash_accept(sock, newsock, arg); } static struct proto_ops algif_hash_ops_nokey = { @@ -471,4 +467,5 @@ static void __exit algif_hash_exit(void) module_init(algif_hash_init); module_exit(algif_hash_exit); +MODULE_DESCRIPTION("Userspace interface for hash algorithms"); MODULE_LICENSE("GPL"); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 02cea2149504..125d395c5e00 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -437,4 +437,5 @@ static void __exit algif_skcipher_exit(void) module_init(algif_skcipher_init); module_exit(algif_skcipher_exit); +MODULE_DESCRIPTION("Userspace interface for skcipher algorithms"); MODULE_LICENSE("GPL"); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 3f512efaba3a..153523ce6076 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -467,8 +467,8 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); module_param(dbg, int, 0); MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); -subsys_initcall(prng_mod_init); +module_init(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_ALIAS_CRYPTO("ansi_cprng"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/anubis.c b/crypto/anubis.c index 9f0cf61bbc6e..4268c3833baa 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -33,7 +33,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 @@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; @@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) - kappa[i] = be32_to_cpu(key[i]); + kappa[i] = get_unaligned_be32(&in_key[4 * i]); /* * generate R + 1 round keys: @@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], - u8 *ciphertext, const u8 *plaintext, const int R) + u8 *dst, const u8 *src, const int R) { - const __be32 *src = (const __be32 *)plaintext; - __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; @@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) - state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; + state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i]; /* * R - 1 full rounds: @@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], */ for (i = 0; i < 4; i++) - dst[i] = cpu_to_be32(inter[i]); + put_unaligned_be32(inter[i], &dst[4 * i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, @@ -698,7 +694,7 @@ static void __exit anubis_mod_fini(void) crypto_unregister_alg(&anubis_alg); } -subsys_initcall(anubis_mod_init); +module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/api.c b/crypto/api.c index 7f402107f0cc..5724d62e9d07 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,12 +31,14 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); -#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); -EXPORT_SYMBOL_GPL(__crypto_boot_test_finished); #endif -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask); +static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, + u32 mask); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { @@ -68,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, if ((q->cra_flags ^ type) & mask) continue; - if (crypto_is_larval(q) && - !crypto_is_test_larval((struct crypto_larval *)q) && - ((struct crypto_larval *)q)->mask != mask) - continue; - exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) @@ -111,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) if (!larval) return ERR_PTR(-ENOMEM); + type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK); + larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; @@ -146,38 +145,37 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); } return alg; } -void crypto_larval_kill(struct crypto_alg *alg) +static void crypto_larval_kill(struct crypto_larval *larval) { - struct crypto_larval *larval = (void *)alg; + bool unlinked; down_write(&crypto_alg_sem); - list_del(&alg->cra_list); + unlinked = list_empty(&larval->alg.cra_list); + if (!unlinked) + list_del_init(&larval->alg.cra_list); up_write(&crypto_alg_sem); + + if (unlinked) + return; + complete_all(&larval->completion); - crypto_alg_put(alg); + crypto_alg_put(&larval->alg); } -EXPORT_SYMBOL_GPL(crypto_larval_kill); -void crypto_wait_for_test(struct crypto_larval *larval) +void crypto_schedule_test(struct crypto_larval *larval) { int err; err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (WARN_ON_ONCE(err != NOTIFY_STOP)) - goto out; - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); -out: - crypto_larval_kill(&larval->alg); + WARN_ON_ONCE(err != NOTIFY_STOP); } -EXPORT_SYMBOL_GPL(crypto_wait_for_test); +EXPORT_SYMBOL_GPL(crypto_schedule_test); static void crypto_start_test(struct crypto_larval *larval) { @@ -196,28 +194,45 @@ static void crypto_start_test(struct crypto_larval *larval) larval->test_started = true; up_write(&crypto_alg_sem); - crypto_wait_for_test(larval); + crypto_schedule_test(larval); } -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask) { - struct crypto_larval *larval = (void *)alg; - long timeout; + struct crypto_larval *larval; + long time_left; + +again: + larval = container_of(alg, struct crypto_larval, alg); if (!crypto_boot_test_finished()) crypto_start_test(larval); - timeout = wait_for_completion_killable_timeout( + time_left = wait_for_completion_killable_timeout( &larval->completion, 60 * HZ); alg = larval->adult; - if (timeout < 0) + if (time_left < 0) alg = ERR_PTR(-EINTR); - else if (!timeout) + else if (!time_left) { + if (crypto_is_test_larval(larval)) + crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - else if (!alg) - alg = ERR_PTR(-ENOENT); - else if (IS_ERR(alg)) + } else if (!alg || PTR_ERR(alg) == -EEXIST) { + int err = alg ? -EEXIST : -EAGAIN; + + /* + * EEXIST is expected because two probes can be scheduled + * at the same time with one using alg_name and the other + * using driver_name. Do a re-lookup but do not retry in + * case we hit a quirk like gcm_base(ctr(aes),...) which + * will never match. + */ + alg = &larval->alg; + alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: + ERR_PTR(err); + } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) @@ -228,6 +243,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); + if (!IS_ERR(alg) && crypto_is_larval(alg)) + goto again; + return alg; } @@ -291,9 +309,13 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, } if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); - else if (!alg) + alg = crypto_larval_wait(alg, type, mask); + else if (alg) + ; + else if (!(mask & CRYPTO_ALG_TESTED)) alg = crypto_larval_add(name, type, mask); + else + alg = ERR_PTR(-ENOENT); return alg; } @@ -335,12 +357,12 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) - alg = crypto_larval_wait(larval); + alg = crypto_larval_wait(larval, type, mask); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } - crypto_larval_kill(larval); + crypto_larval_kill(container_of(larval, struct crypto_larval, alg)); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); @@ -369,10 +391,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; - - case CRYPTO_ALG_TYPE_COMPRESS: - len += crypto_compress_ctxsize(alg); - break; } return len; @@ -518,6 +536,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, goto out; tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); + tfm->fb = tfm; err = frontend->init_tfm(tfm); if (err) @@ -559,7 +578,7 @@ void *crypto_clone_tfm(const struct crypto_type *frontend, tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); tfm->crt_flags = otfm->crt_flags; - tfm->exit = otfm->exit; + tfm->fb = tfm; out: return mem; @@ -693,5 +712,31 @@ void crypto_req_done(void *data, int err) } EXPORT_SYMBOL_GPL(crypto_req_done); +void crypto_destroy_alg(struct crypto_alg *alg) +{ + if (alg->cra_type && alg->cra_type->destroy) + alg->cra_type->destroy(alg); + if (alg->cra_destroy) + alg->cra_destroy(alg); +} +EXPORT_SYMBOL_GPL(crypto_destroy_alg); + +struct crypto_async_request *crypto_request_clone( + struct crypto_async_request *req, size_t total, gfp_t gfp) +{ + struct crypto_tfm *tfm = req->tfm; + struct crypto_async_request *nreq; + + nreq = kmemdup(req, total, gfp); + if (!nreq) { + req->tfm = tfm->fb; + return req; + } + + nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK; + return nreq; +} +EXPORT_SYMBOL_GPL(crypto_request_clone); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/arc4.c b/crypto/arc4.c index 1a4825c97c5a..1608018111d0 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -73,7 +73,7 @@ static void __exit arc4_exit(void) crypto_unregister_lskcipher(&arc4_alg); } -subsys_initcall(arc4_init); +module_init(arc4_init); module_exit(arc4_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c index d96dfc4fdde6..faa7900383f6 100644 --- a/crypto/aria_generic.c +++ b/crypto/aria_generic.c @@ -15,6 +15,7 @@ */ #include <crypto/aria.h> +#include <linux/unaligned.h> static const u32 key_rc[20] = { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, @@ -27,7 +28,6 @@ static const u32 key_rc[20] = { static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, unsigned int key_len) { - const __be32 *key = (const __be32 *)in_key; u32 w0[4], w1[4], w2[4], w3[4]; u32 reg0, reg1, reg2, reg3; const u32 *ck; @@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, ck = &key_rc[(key_len - 16) / 2]; - w0[0] = be32_to_cpu(key[0]); - w0[1] = be32_to_cpu(key[1]); - w0[2] = be32_to_cpu(key[2]); - w0[3] = be32_to_cpu(key[3]); + w0[0] = get_unaligned_be32(&in_key[0]); + w0[1] = get_unaligned_be32(&in_key[4]); + w0[2] = get_unaligned_be32(&in_key[8]); + w0[3] = get_unaligned_be32(&in_key[12]); reg0 = w0[0] ^ ck[0]; reg1 = w0[1] ^ ck[1]; @@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, aria_subst_diff_odd(®0, ®1, ®2, ®3); if (key_len > 16) { - w1[0] = be32_to_cpu(key[4]); - w1[1] = be32_to_cpu(key[5]); + w1[0] = get_unaligned_be32(&in_key[16]); + w1[1] = get_unaligned_be32(&in_key[20]); if (key_len > 24) { - w1[2] = be32_to_cpu(key[6]); - w1[3] = be32_to_cpu(key[7]); + w1[2] = get_unaligned_be32(&in_key[24]); + w1[3] = get_unaligned_be32(&in_key[28]); } else { w1[2] = 0; w1[3] = 0; @@ -195,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key); static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, u32 key[][ARIA_RD_KEY_WORDS]) { - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 reg0, reg1, reg2, reg3; int rounds, rkidx = 0; rounds = ctx->rounds; - reg0 = be32_to_cpu(src[0]); - reg1 = be32_to_cpu(src[1]); - reg2 = be32_to_cpu(src[2]); - reg3 = be32_to_cpu(src[3]); + reg0 = get_unaligned_be32(&in[0]); + reg1 = get_unaligned_be32(&in[4]); + reg2 = get_unaligned_be32(&in[8]); + reg3 = get_unaligned_be32(&in[12]); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); rkidx++; @@ -241,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, (u8)(s1[get_u8(reg3, 2)]), (u8)(s2[get_u8(reg3, 3)])); - dst[0] = cpu_to_be32(reg0); - dst[1] = cpu_to_be32(reg1); - dst[2] = cpu_to_be32(reg2); - dst[3] = cpu_to_be32(reg3); + put_unaligned_be32(reg0, &out[0]); + put_unaligned_be32(reg1, &out[4]); + put_unaligned_be32(reg2, &out[8]); + put_unaligned_be32(reg3, &out[12]); } void aria_encrypt(void *_ctx, u8 *out, const u8 *in) @@ -284,7 +282,6 @@ static struct crypto_alg aria_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aria_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { @@ -307,7 +304,7 @@ static void __exit aria_fini(void) crypto_unregister_alg(&aria_alg); } -subsys_initcall(aria_init); +module_init(aria_init); module_exit(aria_fini); MODULE_DESCRIPTION("ARIA Cipher Algorithm"); diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 59ec726b7c77..e1345b8f39f1 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB select CRYPTO_HASH_INFO select CRYPTO_AKCIPHER + select CRYPTO_SIG select CRYPTO_HASH help This option provides support for asymmetric public key type handling. @@ -85,5 +86,21 @@ config FIPS_SIGNATURE_SELFTEST depends on ASYMMETRIC_KEY_TYPE depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER depends on X509_CERTIFICATE_PARSER + depends on CRYPTO_RSA + depends on CRYPTO_SHA256 + +config FIPS_SIGNATURE_SELFTEST_RSA + bool + default y + depends on FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_RSA=y || CRYPTO_RSA=FIPS_SIGNATURE_SELFTEST + +config FIPS_SIGNATURE_SELFTEST_ECDSA + bool + default y + depends on FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST + depends on CRYPTO_ECDSA=y || CRYPTO_ECDSA=FIPS_SIGNATURE_SELFTEST endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 1a273d6df3eb..bc65d3b98dcb 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -24,6 +24,8 @@ x509_key_parser-y := \ x509_public_key.o obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o x509_selftest-y += selftest.o +x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_RSA) += selftest_rsa.o +x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA) += selftest_ecdsa.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index a5da8ccd353e..ba2d9d1ea235 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -18,16 +18,6 @@ #include "asymmetric_keys.h" -const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = { - [VERIFYING_MODULE_SIGNATURE] = "mod sig", - [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig", - [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig", - [VERIFYING_KEY_SIGNATURE] = "key sig", - [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig", - [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig", -}; -EXPORT_SYMBOL_GPL(key_being_used_for); - static LIST_HEAD(asymmetric_key_parsers); static DECLARE_RWSEM(asymmetric_key_parsers_sem); @@ -60,17 +50,18 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len; - WARN_ON(!id_0 && !id_1 && !id_2); - if (id_0) { lookup = id_0->data; len = id_0->len; } else if (id_1) { lookup = id_1->data; len = id_1->len; - } else { + } else if (id_2) { lookup = id_2->data; len = id_2->len; + } else { + WARN_ON(1); + return ERR_PTR(-EINVAL); } /* Construct an identifier "id:<keyid>". */ diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 05402ef8964e..8aecbe4637f3 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,6 +75,9 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { + case OID_sha1: + ctx->digest_algo = "sha1"; + break; case OID_sha256: ctx->digest_algo = "sha256"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 5b08c50722d0..423d13c47545 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,6 +227,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { + case OID_sha1: + ctx->sinfo->sig->hash_algo = "sha1"; + break; case OID_sha256: ctx->sinfo->sig->hash_algo = "sha256"; break; @@ -278,6 +281,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; + case OID_id_ecdsa_with_sha1: case OID_id_ecdsa_with_sha224: case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: @@ -288,10 +292,6 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "ecdsa"; ctx->sinfo->sig->encoding = "x962"; break; - case OID_SM2_with_SM3: - ctx->sinfo->sig->pkey_algo = "sm2"; - ctx->sinfo->sig->encoding = "raw"; - break; case OID_gost2012PKey256: case OID_gost2012PKey512: ctx->sinfo->sig->pkey_algo = "ecrdsa"; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index e5f22691febd..e5b177c8e842 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -83,13 +83,19 @@ software_key_determine_akcipher(const struct public_key *pkey, if (strcmp(encoding, "pkcs1") == 0) { *sig = op == kernel_pkey_sign || op == kernel_pkey_verify; - if (!hash_algo) { + if (!*sig) { + /* + * For encrypt/decrypt, hash_algo is not used + * but allowed to be set for historic reasons. + */ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", pkey->pkey_algo); } else { + if (!hash_algo) + hash_algo = "none"; n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", + "pkcs1(%s,%s)", pkey->pkey_algo, hash_algo); } return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; @@ -104,7 +110,8 @@ software_key_determine_akcipher(const struct public_key *pkey, return -EINVAL; *sig = false; } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { - if (strcmp(encoding, "x962") != 0) + if (strcmp(encoding, "x962") != 0 && + strcmp(encoding, "p1363") != 0) return -EINVAL; /* * ECDSA signatures are taken over a raw hash, so they don't @@ -115,7 +122,8 @@ software_key_determine_akcipher(const struct public_key *pkey, */ if (!hash_algo) return -EINVAL; - if (strcmp(hash_algo, "sha224") != 0 && + if (strcmp(hash_algo, "sha1") != 0 && + strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && strcmp(hash_algo, "sha512") != 0 && @@ -123,13 +131,9 @@ software_key_determine_akcipher(const struct public_key *pkey, strcmp(hash_algo, "sha3-384") != 0 && strcmp(hash_algo, "sha3-512") != 0) return -EINVAL; - } else if (strcmp(pkey->pkey_algo, "sm2") == 0) { - if (strcmp(encoding, "raw") != 0) - return -EINVAL; - if (!hash_algo) - return -EINVAL; - if (strcmp(hash_algo, "sm3") != 0) - return -EINVAL; + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", + encoding, pkey->pkey_algo); + return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) { if (strcmp(encoding, "raw") != 0) return -EINVAL; @@ -159,10 +163,8 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val) static int software_key_query(const struct kernel_pkey_params *params, struct kernel_pkey_query *info) { - struct crypto_akcipher *tfm; struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_sig *sig; u8 *key, *ptr; int ret, len; bool issig; @@ -184,7 +186,11 @@ static int software_key_query(const struct kernel_pkey_params *params, ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); + memset(info, 0, sizeof(*info)); + if (issig) { + struct crypto_sig *sig; + sig = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(sig)) { ret = PTR_ERR(sig); @@ -196,20 +202,31 @@ static int software_key_query(const struct kernel_pkey_params *params, else ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret < 0) - goto error_free_tfm; + goto error_free_sig; - len = crypto_sig_maxsize(sig); + len = crypto_sig_keysize(sig); + info->key_size = len; + info->max_sig_size = crypto_sig_maxsize(sig); + info->max_data_size = crypto_sig_digestsize(sig); info->supported_ops = KEYCTL_SUPPORTS_VERIFY; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_SIGN; if (strcmp(params->encoding, "pkcs1") == 0) { + info->max_enc_size = len / BITS_PER_BYTE; + info->max_dec_size = len / BITS_PER_BYTE; + info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; } + +error_free_sig: + crypto_free_sig(sig); } else { + struct crypto_akcipher *tfm; + tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); @@ -221,48 +238,23 @@ static int software_key_query(const struct kernel_pkey_params *params, else ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret < 0) - goto error_free_tfm; + goto error_free_akcipher; len = crypto_akcipher_maxsize(tfm); + info->key_size = len * BITS_PER_BYTE; + info->max_sig_size = len; + info->max_data_size = len; + info->max_enc_size = len; + info->max_dec_size = len; info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; - } - - info->key_size = len * 8; - if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { - /* - * ECDSA key sizes are much smaller than RSA, and thus could - * operate on (hashed) inputs that are larger than key size. - * For example SHA384-hashed input used with secp256r1 - * based keys. Set max_data_size to be at least as large as - * the largest supported hash size (SHA512) - */ - info->max_data_size = 64; - - /* - * Verify takes ECDSA-Sig (described in RFC 5480) as input, - * which is actually 2 'key_size'-bit integers encoded in - * ASN.1. Account for the ASN.1 encoding overhead here. - */ - info->max_sig_size = 2 * (len + 3) + 2; - } else { - info->max_data_size = len; - info->max_sig_size = len; +error_free_akcipher: + crypto_free_akcipher(tfm); } - info->max_enc_size = len; - info->max_dec_size = len; - - ret = 0; - -error_free_tfm: - if (issig) - crypto_free_sig(sig); - else - crypto_free_akcipher(tfm); error_free_key: kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); @@ -281,7 +273,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, struct crypto_sig *sig; char *key, *ptr; bool issig; - int ksz; int ret; pr_devel("==>%s()\n", __func__); @@ -316,8 +307,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret) goto error_free_tfm; - - ksz = crypto_sig_maxsize(sig); } else { tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { @@ -331,8 +320,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret) goto error_free_tfm; - - ksz = crypto_akcipher_maxsize(tfm); } ret = -EINVAL; @@ -361,8 +348,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params, BUG(); } - if (ret == 0) - ret = ksz; + if (!issig && ret == 0) + ret = crypto_akcipher_maxsize(tfm); error_free_tfm: if (issig) diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c index c50da7ef90ae..98dc5cdfdebe 100644 --- a/crypto/asymmetric_keys/selftest.c +++ b/crypto/asymmetric_keys/selftest.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Self-testing for signature checking. * * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. @@ -9,179 +10,18 @@ #include <linux/kernel.h> #include <linux/key.h> #include <linux/module.h> +#include "selftest.h" #include "x509_parser.h" -struct certs_test { - const u8 *data; - size_t data_len; - const u8 *pkcs7; - size_t pkcs7_len; -}; - -/* - * Set of X.509 certificates to provide public keys for the tests. These will - * be loaded into a temporary keyring for the duration of the testing. - */ -static const __initconst u8 certs_selftest_keys[] = { - "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73" - "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a" - "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b" - "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43" - "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66" - "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73" - "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35" - "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30" - "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30" - "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61" - "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20" - "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79" - "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01" - "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01" - "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f" - "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99" - "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92" - "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77" - "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55" - "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d" - "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44" - "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01" - "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7" - "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68" - "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6" - "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81" - "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78" - "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6" - "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f" - "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62" - "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94" - "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48" - "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c" - "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45" - "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a" - "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68" - "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09" - "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95" - "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec" - "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe" - "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42" - "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35" - "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4" - "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f" - "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a" - "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc" - "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d" - "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04" - "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14" - "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17" - "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80" - "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88" - "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01" - "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85" - "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47" - "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88" - "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12" - "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f" - "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71" - "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b" - "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56" - "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f" - "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02" - "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52" - "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a" - "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99" - "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa" - "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87" - "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13" - "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f" - "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66" - "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07" - "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05" - "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04" - "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c" - "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb" - "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48" - "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d" - "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4" - "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd" - "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71" - "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70" - "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46" - "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0" - "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62" - "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3" -}; - -/* - * Signed data and detached signature blobs that form the verification tests. - */ -static const __initconst u8 certs_selftest_1_data[] = { - "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" - "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" - "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" - "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" - "\x61\x74\x69\x6f\x6e\x2e\x0a" -}; - -static const __initconst u8 certs_selftest_1_pkcs7[] = { - "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0" - "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09" - "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48" - "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01" - "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29" - "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69" - "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65" - "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d" - "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30" - "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09" - "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac" - "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3" - "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c" - "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00" - "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb" - "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11" - "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33" - "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23" - "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37" - "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab" - "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c" - "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41" - "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6" - "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72" - "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87" - "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4" - "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92" - "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79" - "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b" - "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0" - "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f" - "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e" - "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31" - "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24" - "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14" - "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe" - "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8" - "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e" - "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36" - "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb" - "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64" - "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51" - "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2" -}; - -/* - * List of tests to be run. - */ -#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 } -static const struct certs_test certs_tests[] __initconst = { - TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), -}; - -static int __init fips_signature_selftest(void) +void fips_signature_selftest(const char *name, + const u8 *keys, size_t keys_len, + const u8 *data, size_t data_len, + const u8 *sig, size_t sig_len) { struct key *keyring; - int ret, i; + int ret; - pr_notice("Running certificate verification selftests\n"); + pr_notice("Running certificate verification %s selftest\n", name); keyring = keyring_alloc(".certs_selftest", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), @@ -191,40 +31,41 @@ static int __init fips_signature_selftest(void) KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) - panic("Can't allocate certs selftest keyring: %ld\n", - PTR_ERR(keyring)); + panic("Can't allocate certs %s selftest keyring: %ld\n", name, PTR_ERR(keyring)); - ret = x509_load_certificate_list(certs_selftest_keys, - sizeof(certs_selftest_keys) - 1, keyring); + ret = x509_load_certificate_list(keys, keys_len, keyring); if (ret < 0) - panic("Can't allocate certs selftest keyring: %d\n", ret); + panic("Can't allocate certs %s selftest keyring: %d\n", name, ret); - for (i = 0; i < ARRAY_SIZE(certs_tests); i++) { - const struct certs_test *test = &certs_tests[i]; - struct pkcs7_message *pkcs7; + struct pkcs7_message *pkcs7; - pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len); - if (IS_ERR(pkcs7)) - panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret); + pkcs7 = pkcs7_parse_message(sig, sig_len); + if (IS_ERR(pkcs7)) + panic("Certs %s selftest: pkcs7_parse_message() = %d\n", name, ret); - pkcs7_supply_detached_data(pkcs7, test->data, test->data_len); + pkcs7_supply_detached_data(pkcs7, data, data_len); - ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE); - if (ret < 0) - panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret); + ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE); + if (ret < 0) + panic("Certs %s selftest: pkcs7_verify() = %d\n", name, ret); - ret = pkcs7_validate_trust(pkcs7, keyring); - if (ret < 0) - panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret); + ret = pkcs7_validate_trust(pkcs7, keyring); + if (ret < 0) + panic("Certs %s selftest: pkcs7_validate_trust() = %d\n", name, ret); - pkcs7_free_message(pkcs7); - } + pkcs7_free_message(pkcs7); key_put(keyring); +} + +static int __init fips_signature_selftest_init(void) +{ + fips_signature_selftest_rsa(); + fips_signature_selftest_ecdsa(); return 0; } -late_initcall(fips_signature_selftest); +late_initcall(fips_signature_selftest_init); MODULE_DESCRIPTION("X.509 self tests"); MODULE_AUTHOR("Red Hat, Inc."); diff --git a/crypto/asymmetric_keys/selftest.h b/crypto/asymmetric_keys/selftest.h new file mode 100644 index 000000000000..4139f05906cb --- /dev/null +++ b/crypto/asymmetric_keys/selftest.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Helper function for self-testing PKCS#7 signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +void fips_signature_selftest(const char *name, + const u8 *keys, size_t keys_len, + const u8 *data, size_t data_len, + const u8 *sig, size_t sig_len); + +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_RSA +void __init fips_signature_selftest_rsa(void); +#else +static inline void __init fips_signature_selftest_rsa(void) { } +#endif + +#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA +void __init fips_signature_selftest_ecdsa(void); +#else +static inline void __init fips_signature_selftest_ecdsa(void) { } +#endif diff --git a/crypto/asymmetric_keys/selftest_ecdsa.c b/crypto/asymmetric_keys/selftest_ecdsa.c new file mode 100644 index 000000000000..20a0868b30de --- /dev/null +++ b/crypto/asymmetric_keys/selftest_ecdsa.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Self-tests for PKCS#7 ECDSA signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +#include <linux/module.h> +#include "selftest.h" + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const u8 certs_selftest_ecdsa_keys[] __initconst = { + /* P-256 ECDSA certificate */ + "\x30\x82\x01\xd4\x30\x82\x01\x7b\xa0\x03\x02\x01\x02\x02\x14\x2e" + "\xea\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0" + "\xf8\x6f\xde\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x30" + "\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74" + "\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61" + "\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d" + "\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32" + "\x34\x30\x34\x31\x33\x32\x32\x31\x36\x32\x36\x5a\x18\x0f\x32\x31" + "\x32\x34\x30\x33\x32\x30\x32\x32\x31\x36\x32\x36\x5a\x30\x3a\x31" + "\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74\x69\x66" + "\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69" + "\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x59\x30\x13\x06\x07\x2a" + "\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07" + "\x03\x42\x00\x04\x07\xe5\x6b\x51\xaf\xfc\x19\x41\x2c\x88\x92\x6b" + "\x77\x57\x71\x03\x9e\xe2\xfe\x6e\x6a\x71\x4e\xc7\x29\x9f\x90\xe1" + "\x77\x18\x9f\xc2\xe7\x0a\x82\xd0\x8a\xe1\x81\xa9\x71\x7c\x5a\x73" + "\xfb\x25\xb9\x5b\x1e\x24\x8c\x73\x9f\xf8\x38\xf8\x48\xb4\xad\x16" + "\x19\xc0\x22\xc6\xa3\x5d\x30\x5b\x30\x1d\x06\x03\x55\x1d\x0e\x04" + "\x16\x04\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2\x3d" + "\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x1f\x06\x03\x55\x1d\x23\x04\x18" + "\x30\x16\x80\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2" + "\x3d\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x0c\x06\x03\x55\x1d\x13\x01" + "\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04\x04\x03" + "\x02\x07\x80\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x03" + "\x47\x00\x30\x44\x02\x20\x1a\xd7\xac\x07\xc8\x97\x38\xf4\x89\x43" + "\x7e\xc7\x66\x6e\xa5\x00\x7c\x12\x1d\xb4\x09\x76\x0c\x99\x6b\x8c" + "\x26\x5d\xe9\x70\x5c\xb4\x02\x20\x73\xb7\xc7\x7a\x5a\xdb\x67\x0a" + "\x96\x42\x19\xcf\x4f\x67\x4f\x35\x6a\xee\x29\x25\xf2\x4f\xc8\x10" + "\x14\x9d\x79\x69\x1c\x7a\xd7\x5d" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const u8 certs_selftest_ecdsa_data[] __initconst = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const u8 certs_selftest_ecdsa_sig[] __initconst = { + /* ECDSA signature using SHA-256 */ + "\x30\x81\xf4\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0\x81" + "\xe6\x30\x81\xe3\x02\x01\x01\x31\x0f\x30\x0d\x06\x09\x60\x86\x48" + "\x01\x65\x03\x04\x02\x01\x05\x00\x30\x0b\x06\x09\x2a\x86\x48\x86" + "\xf7\x0d\x01\x07\x01\x31\x81\xbf\x30\x81\xbc\x02\x01\x01\x30\x52" + "\x30\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66" + "\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x2e\xea" + "\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0\xf8" + "\x6f\xde\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05" + "\x00\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x04\x48\x30" + "\x46\x02\x21\x00\x86\xd1\xf4\x06\xb6\x49\x79\xf9\x09\x5f\x35\x1a" + "\x94\x7e\x0e\x1a\x12\x4d\xd9\xe6\x2a\x2d\xcf\x2d\x0a\xee\x88\x76" + "\xe0\x35\xf3\xeb\x02\x21\x00\xdf\x11\x8a\xab\x31\xf6\x3c\x1f\x32" + "\x43\x94\xe2\xb8\x35\xc9\xf3\x12\x4e\x9b\x31\x08\x10\x5d\x8d\xe2" + "\x43\x0a\x5f\xf5\xfd\xa2\xf1" +}; + +void __init fips_signature_selftest_ecdsa(void) +{ + fips_signature_selftest("ECDSA", + certs_selftest_ecdsa_keys, + sizeof(certs_selftest_ecdsa_keys) - 1, + certs_selftest_ecdsa_data, + sizeof(certs_selftest_ecdsa_data) - 1, + certs_selftest_ecdsa_sig, + sizeof(certs_selftest_ecdsa_sig) - 1); +} diff --git a/crypto/asymmetric_keys/selftest_rsa.c b/crypto/asymmetric_keys/selftest_rsa.c new file mode 100644 index 000000000000..09c9815e456a --- /dev/null +++ b/crypto/asymmetric_keys/selftest_rsa.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Self-tests for PKCS#7 RSA signature verification. + * + * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com> + */ + +#include <linux/module.h> +#include "selftest.h" + +/* + * Set of X.509 certificates to provide public keys for the tests. These will + * be loaded into a temporary keyring for the duration of the testing. + */ +static const u8 certs_selftest_rsa_keys[] __initconst = { + /* 4096-bit RSA certificate */ + "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73" + "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a" + "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b" + "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43" + "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66" + "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73" + "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35" + "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30" + "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30" + "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61" + "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79" + "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01" + "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01" + "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f" + "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99" + "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92" + "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77" + "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55" + "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d" + "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44" + "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01" + "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7" + "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68" + "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6" + "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81" + "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78" + "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6" + "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f" + "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62" + "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94" + "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48" + "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c" + "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45" + "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a" + "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68" + "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09" + "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95" + "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec" + "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe" + "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42" + "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35" + "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4" + "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f" + "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a" + "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc" + "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d" + "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04" + "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14" + "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17" + "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80" + "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88" + "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01" + "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85" + "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47" + "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88" + "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12" + "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f" + "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71" + "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b" + "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56" + "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f" + "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02" + "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52" + "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a" + "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99" + "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa" + "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87" + "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13" + "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f" + "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66" + "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07" + "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05" + "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04" + "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c" + "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb" + "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48" + "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d" + "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4" + "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd" + "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71" + "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70" + "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46" + "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0" + "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62" + "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3" +}; + +/* + * Signed data and detached signature blobs that form the verification tests. + */ +static const u8 certs_selftest_rsa_data[] __initconst = { + "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73" + "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20" + "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72" + "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63" + "\x61\x74\x69\x6f\x6e\x2e\x0a" +}; + +static const u8 certs_selftest_rsa_sig[] __initconst = { + /* RSA signature using PKCS#1 v1.5 padding with SHA-256 */ + "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0" + "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09" + "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48" + "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01" + "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29" + "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69" + "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65" + "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d" + "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30" + "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09" + "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac" + "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3" + "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c" + "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00" + "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb" + "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11" + "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33" + "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23" + "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37" + "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab" + "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c" + "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41" + "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6" + "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72" + "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87" + "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4" + "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92" + "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79" + "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b" + "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0" + "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f" + "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e" + "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31" + "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24" + "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14" + "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe" + "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8" + "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e" + "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36" + "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb" + "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64" + "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51" + "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2" +}; + +void __init fips_signature_selftest_rsa(void) +{ + fips_signature_selftest("RSA", + certs_selftest_rsa_keys, + sizeof(certs_selftest_rsa_keys) - 1, + certs_selftest_rsa_data, + sizeof(certs_selftest_rsa_data) - 1, + certs_selftest_rsa_sig, + sizeof(certs_selftest_rsa_sig) - 1); +} diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 398983be77e8..041d04b5c953 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -65,69 +65,6 @@ int query_asymmetric_key(const struct kernel_pkey_params *params, EXPORT_SYMBOL_GPL(query_asymmetric_key); /** - * encrypt_blob - Encrypt data using an asymmetric key - * @params: Various parameters - * @data: Data blob to be encrypted, length params->data_len - * @enc: Encrypted data buffer, length params->enc_len - * - * Encrypt the specified data blob using the private key specified by - * params->key. The encrypted data is wrapped in an encoding if - * params->encoding is specified (eg. "pkcs1"). - * - * Returns the length of the data placed in the encrypted data buffer or an - * error. - */ -int encrypt_blob(struct kernel_pkey_params *params, - const void *data, void *enc) -{ - params->op = kernel_pkey_encrypt; - return asymmetric_key_eds_op(params, data, enc); -} -EXPORT_SYMBOL_GPL(encrypt_blob); - -/** - * decrypt_blob - Decrypt data using an asymmetric key - * @params: Various parameters - * @enc: Encrypted data to be decrypted, length params->enc_len - * @data: Decrypted data buffer, length params->data_len - * - * Decrypt the specified data blob using the private key specified by - * params->key. The decrypted data is wrapped in an encoding if - * params->encoding is specified (eg. "pkcs1"). - * - * Returns the length of the data placed in the decrypted data buffer or an - * error. - */ -int decrypt_blob(struct kernel_pkey_params *params, - const void *enc, void *data) -{ - params->op = kernel_pkey_decrypt; - return asymmetric_key_eds_op(params, enc, data); -} -EXPORT_SYMBOL_GPL(decrypt_blob); - -/** - * create_signature - Sign some data using an asymmetric key - * @params: Various parameters - * @data: Data blob to be signed, length params->data_len - * @enc: Signature buffer, length params->enc_len - * - * Sign the specified data blob using the private key specified by params->key. - * The signature is wrapped in an encoding if params->encoding is specified - * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be - * passed through params->hash_algo (eg. "sha512"). - * - * Returns the length of the data placed in the signature buffer or an error. - */ -int create_signature(struct kernel_pkey_params *params, - const void *data, void *enc) -{ - params->op = kernel_pkey_sign; - return asymmetric_key_eds_op(params, data, enc); -} -EXPORT_SYMBOL_GPL(create_signature); - -/** * verify_signature - Initiate the use of an asymmetric key to verify a signature * @key: The asymmetric key to verify against * @sig: The signature to check diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index 2863984b6700..1f3b227ba7f2 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -40,13 +40,13 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, } while (0) chkaddr(0, 0, sizeof(*mz)); - if (mz->magic != MZ_MAGIC) + if (mz->magic != IMAGE_DOS_SIGNATURE) return -ELIBBAD; cursor = sizeof(*mz); chkaddr(cursor, mz->peaddr, sizeof(*pe)); pe = pebuf + mz->peaddr; - if (pe->magic != PE_MAGIC) + if (pe->magic != IMAGE_NT_SIGNATURE) return -ELIBBAD; cursor = mz->peaddr + sizeof(*pe); @@ -55,7 +55,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, pe64 = pebuf + cursor; switch (pe32->magic) { - case PE_OPT_MAGIC_PE32: + case IMAGE_NT_OPTIONAL_HDR32_MAGIC: chkaddr(0, cursor, sizeof(*pe32)); ctx->image_checksum_offset = (unsigned long)&pe32->csum - (unsigned long)pebuf; @@ -64,7 +64,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, ctx->n_data_dirents = pe32->data_dirs; break; - case PE_OPT_MAGIC_PE32PLUS: + case IMAGE_NT_OPTIONAL_HDR64_MAGIC: chkaddr(0, cursor, sizeof(*pe64)); ctx->image_checksum_offset = (unsigned long)&pe64->csum - (unsigned long)pebuf; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 487204d39426..2ffe4ae90bea 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -60,24 +60,23 @@ EXPORT_SYMBOL_GPL(x509_free_certificate); */ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) { - struct x509_certificate *cert; - struct x509_parse_context *ctx; + struct x509_certificate *cert __free(x509_free_certificate); + struct x509_parse_context *ctx __free(kfree) = NULL; struct asymmetric_key_id *kid; long ret; - ret = -ENOMEM; cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL); if (!cert) - goto error_no_cert; + return ERR_PTR(-ENOMEM); cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!cert->pub) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!cert->sig) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); if (!ctx) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx->cert = cert; ctx->data = (unsigned long)data; @@ -85,7 +84,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Attempt to decode the certificate */ ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Decode the AuthorityKeyIdentifier */ if (ctx->raw_akid) { @@ -95,20 +94,19 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) ctx->raw_akid, ctx->raw_akid_size); if (ret < 0) { pr_warn("Couldn't decode AuthKeyIdentifier\n"); - goto error_decode; + return ERR_PTR(ret); } } - ret = -ENOMEM; cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); if (!cert->pub->key) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->keylen = ctx->key_size; cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL); if (!cert->pub->params) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->paramlen = ctx->params_size; cert->pub->algo = ctx->key_algo; @@ -116,33 +114,23 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Grab the signature bits */ ret = x509_get_sig_params(cert); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Generate cert issuer + serial number key ID */ kid = asymmetric_key_generate_id(cert->raw_serial, cert->raw_serial_size, cert->raw_issuer, cert->raw_issuer_size); - if (IS_ERR(kid)) { - ret = PTR_ERR(kid); - goto error_decode; - } + if (IS_ERR(kid)) + return ERR_CAST(kid); cert->id = kid; /* Detect self-signed certificates */ ret = x509_check_for_self_signed(cert); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); - kfree(ctx); - return cert; - -error_decode: - kfree(ctx); -error_no_ctx: - x509_free_certificate(cert); -error_no_cert: - return ERR_PTR(ret); + return_ptr(cert); } EXPORT_SYMBOL_GPL(x509_cert_parse); @@ -198,6 +186,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, default: return -ENOPKG; /* Unsupported combination */ + case OID_sha1WithRSAEncryption: + ctx->cert->sig->hash_algo = "sha1"; + goto rsa_pkcs1; + case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; @@ -214,6 +206,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; + case OID_id_ecdsa_with_sha1: + ctx->cert->sig->hash_algo = "sha1"; + goto ecdsa; + case OID_id_rsassa_pkcs1_v1_5_with_sha3_256: ctx->cert->sig->hash_algo = "sha3-256"; goto rsa_pkcs1; @@ -261,10 +257,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, case OID_gost2012Signature512: ctx->cert->sig->hash_algo = "streebog512"; goto ecrdsa; - - case OID_SM2_with_SM3: - ctx->cert->sig->hash_algo = "sm3"; - goto sm2; } rsa_pkcs1: @@ -277,11 +269,6 @@ ecrdsa: ctx->cert->sig->encoding = "raw"; ctx->sig_algo = ctx->last_oid; return 0; -sm2: - ctx->cert->sig->pkey_algo = "sm2"; - ctx->cert->sig->encoding = "raw"; - ctx->sig_algo = ctx->last_oid; - return 0; ecdsa: ctx->cert->sig->pkey_algo = "ecdsa"; ctx->cert->sig->encoding = "x962"; @@ -313,7 +300,6 @@ int x509_note_signature(void *context, size_t hdrlen, if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || - strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) @@ -386,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, /* Empty name string if no material */ if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { - buffer = kmalloc(1, GFP_KERNEL); + buffer = kzalloc(1, GFP_KERNEL); if (!buffer) return -ENOMEM; - buffer[0] = 0; goto done; } @@ -518,17 +503,11 @@ int x509_extract_key_data(void *context, size_t hdrlen, case OID_gost2012PKey512: ctx->cert->pub->pkey_algo = "ecrdsa"; break; - case OID_sm2: - ctx->cert->pub->pkey_algo = "sm2"; - break; case OID_id_ecPublicKey: if (parse_OID(ctx->params, ctx->params_size, &oid) != 0) return -EBADMSG; switch (oid) { - case OID_sm2: - ctx->cert->pub->pkey_algo = "sm2"; - break; case OID_id_prime192v1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p192"; break; @@ -538,6 +517,9 @@ int x509_extract_key_data(void *context, size_t hdrlen, case OID_id_ansip384r1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; break; + case OID_id_ansip521r1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p521"; + break; default: return -ENOPKG; } diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index 97a886cbe01c..0688c222806b 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -5,6 +5,7 @@ * Written by David Howells (dhowells@redhat.com) */ +#include <linux/cleanup.h> #include <linux/time.h> #include <crypto/public_key.h> #include <keys/asymmetric-type.h> @@ -44,6 +45,8 @@ struct x509_certificate { * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); +DEFINE_FREE(x509_free_certificate, struct x509_certificate *, + if (!IS_ERR(_T)) x509_free_certificate(_T)) extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); extern int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6a4f00be22fc..8409d7d36cb4 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -7,7 +7,6 @@ #define pr_fmt(fmt) "X.509: "fmt #include <crypto/hash.h> -#include <crypto/sm2.h> #include <keys/asymmetric-parser.h> #include <keys/asymmetric-subtype.h> #include <keys/system_keyring.h> @@ -64,20 +63,8 @@ int x509_get_sig_params(struct x509_certificate *cert) desc->tfm = tfm; - if (strcmp(cert->pub->pkey_algo, "sm2") == 0) { - ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL : - crypto_shash_init(desc) ?: - sm2_compute_z_digest(desc, cert->pub->key, - cert->pub->keylen, sig->digest) ?: - crypto_shash_init(desc) ?: - crypto_shash_update(desc, sig->digest, - sig->digest_size) ?: - crypto_shash_finup(desc, cert->tbs, cert->tbs_size, - sig->digest); - } else { - ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, - sig->digest); - } + ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, + sig->digest); if (ret < 0) goto error_2; @@ -161,12 +148,11 @@ not_self_signed: */ static int x509_key_preparse(struct key_preparsed_payload *prep) { - struct asymmetric_key_ids *kids; - struct x509_certificate *cert; + struct x509_certificate *cert __free(x509_free_certificate); + struct asymmetric_key_ids *kids __free(kfree) = NULL; + char *p, *desc __free(kfree) = NULL; const char *q; size_t srlen, sulen; - char *desc = NULL, *p; - int ret; cert = x509_cert_parse(prep->data, prep->datalen); if (IS_ERR(cert)) @@ -188,9 +174,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) } /* Don't permit addition of blacklisted keys */ - ret = -EKEYREJECTED; if (cert->blacklisted) - goto error_free_cert; + return -EKEYREJECTED; /* Propose a description */ sulen = strlen(cert->subject); @@ -202,10 +187,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) q = cert->raw_serial; } - ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) - goto error_free_cert; + return -ENOMEM; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; @@ -215,16 +199,14 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL); if (!kids) - goto error_free_desc; + return -ENOMEM; kids->id[0] = cert->id; kids->id[1] = cert->skid; kids->id[2] = asymmetric_key_generate_id(cert->raw_subject, cert->raw_subject_size, "", 0); - if (IS_ERR(kids->id[2])) { - ret = PTR_ERR(kids->id[2]); - goto error_free_kids; - } + if (IS_ERR(kids->id[2])) + return PTR_ERR(kids->id[2]); /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); @@ -242,15 +224,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->sig = NULL; desc = NULL; kids = NULL; - ret = 0; - -error_free_kids: - kfree(kids); -error_free_desc: - kfree(desc); -error_free_cert: - x509_free_certificate(cert); - return ret; + return 0; } static struct asymmetric_key_parser x509_key_parser = { diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1a3855284091..2c499654a36c 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset, } EXPORT_SYMBOL_GPL(async_xor_val_offs); -/** - * async_xor_val - attempt a xor parity check with a dma engine. - * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages - * @offset: offset in pages to start transaction - * @src_cnt: number of source pages - * @len: length in bytes - * @result: 0 if sum == 0 else non-zero - * @submit: submission / completion modifiers - * - * honored flags: ASYNC_TX_ACK - * - * src_list note: if the dest is also a source it must be at index zero. - * The contents of this array will be overwritten if a scribble region - * is not specified. - */ -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit) -{ - return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt, - len, result, submit); -} -EXPORT_SYMBOL_GPL(async_xor_val); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); MODULE_LICENSE("GPL"); diff --git a/crypto/authenc.c b/crypto/authenc.c index 3aaf3ab4e360..a723769c8777 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -9,7 +9,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -28,7 +27,6 @@ struct authenc_instance_ctx { struct crypto_authenc_ctx { struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_request_ctx { @@ -170,21 +168,6 @@ out: authenc_request_complete(areq, err); } -static int crypto_authenc_copy_assoc(struct aead_request *req) -{ - struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, - NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); @@ -203,10 +186,7 @@ static int crypto_authenc_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_copy_assoc(req); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, req->assoclen); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); } @@ -303,7 +283,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -315,14 +294,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; crypto_aead_set_reqsize( tfm, @@ -336,8 +309,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -349,7 +320,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_free(struct aead_instance *inst) @@ -451,7 +421,7 @@ static void __exit crypto_authenc_module_exit(void) crypto_unregister_template(&crypto_authenc_tmpl); } -subsys_initcall(crypto_authenc_module_init); +module_init(crypto_authenc_module_init); module_exit(crypto_authenc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 2cc933e2f790..d1bf0fda3f2e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -12,7 +12,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_esn_request_ctx { @@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(void *data, int err) authenc_esn_request_complete(areq, err); } -static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len) -{ - struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_esn_encrypt(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); @@ -190,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_esn_copy(req, assoclen); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, assoclen); sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); } @@ -277,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) cryptlen -= authsize; - if (req->src != dst) { - err = crypto_authenc_esn_copy(req, assoclen + cryptlen); - if (err) - return err; - } + if (req->src != dst) + memcpy_sglist(dst, req->src, assoclen + cryptlen); scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, authsize, 0); @@ -317,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -329,14 +306,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; ctx->reqoff = 2 * crypto_ahash_digestsize(auth); @@ -352,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -365,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_esn_free(struct aead_instance *inst) @@ -465,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void) crypto_unregister_template(&crypto_authenc_esn_tmpl); } -subsys_initcall(crypto_authenc_esn_module_init); +module_init(crypto_authenc_esn_module_init); module_exit(crypto_authenc_esn_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 32e380b714b6..60f056217510 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -15,12 +15,12 @@ * More information about BLAKE2 can be found at https://blake2.net. */ -#include <asm/unaligned.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bitops.h> #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, @@ -111,8 +111,8 @@ static void blake2b_compress_one_generic(struct blake2b_state *S, #undef G #undef ROUND -void blake2b_compress_generic(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc) +static void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc) { do { blake2b_increment_counter(state, inc); @@ -120,17 +120,19 @@ void blake2b_compress_generic(struct blake2b_state *state, block += BLAKE2B_BLOCK_SIZE; } while (--nblocks); } -EXPORT_SYMBOL(blake2b_compress_generic); static int crypto_blake2b_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { - return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); + return crypto_blake2b_update_bo(desc, in, inlen, + blake2b_compress_generic); } -static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) +static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in, + unsigned int inlen, u8 *out) { - return crypto_blake2b_final(desc, out, blake2b_compress_generic); + return crypto_blake2b_finup(desc, in, inlen, out, + blake2b_compress_generic); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ @@ -138,7 +140,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \ + CRYPTO_AHASH_ALG_BLOCK_ONLY | \ + CRYPTO_AHASH_ALG_FINAL_NONZERO, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ @@ -146,8 +150,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_generic, \ - .final = crypto_blake2b_final_generic, \ - .descsize = sizeof(struct blake2b_state), \ + .finup = crypto_blake2b_finup_generic, \ + .descsize = BLAKE2B_DESC_SIZE, \ + .statesize = BLAKE2B_STATE_SIZE, \ } static struct shash_alg blake2b_algs[] = { @@ -171,7 +176,7 @@ static void __exit blake2b_mod_fini(void) crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } -subsys_initcall(blake2b_mod_init); +module_init(blake2b_mod_init); module_exit(blake2b_mod_fini); MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 0e74c7242e77..f3c5f9b09850 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -16,7 +16,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/types.h> #include <crypto/blowfish.h> @@ -124,7 +124,7 @@ static void __exit blowfish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(blowfish_mod_init); +module_init(blowfish_mod_init); module_exit(blowfish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c new file mode 100644 index 000000000000..a88798d3e8c8 --- /dev/null +++ b/crypto/bpf_crypto_skcipher.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Meta, Inc */ +#include <linux/types.h> +#include <linux/module.h> +#include <linux/bpf_crypto.h> +#include <crypto/skcipher.h> + +static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo) +{ + return crypto_alloc_lskcipher(algo, 0, 0); +} + +static void bpf_crypto_lskcipher_free_tfm(void *tfm) +{ + crypto_free_lskcipher(tfm); +} + +static int bpf_crypto_lskcipher_has_algo(const char *algo) +{ + return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK); +} + +static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen) +{ + return crypto_lskcipher_setkey(tfm, key, keylen); +} + +static u32 bpf_crypto_lskcipher_get_flags(void *tfm) +{ + return crypto_lskcipher_get_flags(tfm); +} + +static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm) +{ + return crypto_lskcipher_ivsize(tfm); +} + +static unsigned int bpf_crypto_lskcipher_statesize(void *tfm) +{ + return crypto_lskcipher_statesize(tfm); +} + +static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst, + unsigned int len, u8 *siv) +{ + return crypto_lskcipher_encrypt(tfm, src, dst, len, siv); +} + +static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst, + unsigned int len, u8 *siv) +{ + return crypto_lskcipher_decrypt(tfm, src, dst, len, siv); +} + +static const struct bpf_crypto_type bpf_crypto_lskcipher_type = { + .alloc_tfm = bpf_crypto_lskcipher_alloc_tfm, + .free_tfm = bpf_crypto_lskcipher_free_tfm, + .has_algo = bpf_crypto_lskcipher_has_algo, + .setkey = bpf_crypto_lskcipher_setkey, + .encrypt = bpf_crypto_lskcipher_encrypt, + .decrypt = bpf_crypto_lskcipher_decrypt, + .ivsize = bpf_crypto_lskcipher_ivsize, + .statesize = bpf_crypto_lskcipher_statesize, + .get_flags = bpf_crypto_lskcipher_get_flags, + .owner = THIS_MODULE, + .name = "skcipher", +}; + +static int __init bpf_crypto_skcipher_init(void) +{ + return bpf_crypto_register_type(&bpf_crypto_lskcipher_type); +} + +static void __exit bpf_crypto_skcipher_exit(void) +{ + int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type); + WARN_ON_ONCE(err); +} + +module_init(bpf_crypto_skcipher_init); +module_exit(bpf_crypto_skcipher_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher support for BPF"); diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index c04670cf51ac..ee4336a04b93 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitops.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static const u32 camellia_sp1110[256] = { 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, @@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void) crypto_unregister_alg(&camellia_alg); } -subsys_initcall(camellia_init); +module_init(camellia_init); module_exit(camellia_fini); MODULE_DESCRIPTION("Camellia Cipher Algorithm"); diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 085a1eedae03..f68330793e0c 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -13,7 +13,7 @@ */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> @@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast5_mod_init); +module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 34f1ab53e3a7..4c08c42646f0 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -10,7 +10,7 @@ */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> @@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast6_mod_init); +module_init(cast6_mod_init); module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cast_common.c b/crypto/cast_common.c index 9b2f60fd4cef..fec1f6609a40 100644 --- a/crypto/cast_common.c +++ b/crypto/cast_common.c @@ -282,4 +282,5 @@ __visible const u32 cast_s4[256] = { }; EXPORT_SYMBOL_GPL(cast_s4); +MODULE_DESCRIPTION("Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)"); MODULE_LICENSE("GPL"); diff --git a/crypto/cbc.c b/crypto/cbc.c index e81918ca68b7..ed3df6246765 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -179,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void) crypto_unregister_template(&crypto_cbc_tmpl); } -subsys_initcall(crypto_cbc_module_init); +module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ccm.c b/crypto/ccm.c index 36f0acec32e1..2ae929ffdef8 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -10,11 +10,12 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string.h> struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; @@ -54,11 +55,6 @@ struct cbcmac_tfm_ctx { struct crypto_cipher *child; }; -struct cbcmac_desc_ctx { - unsigned int len; - u8 dg[]; -}; - static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { @@ -783,12 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) { - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_digestsize(pdesc->tfm); + u8 *dg = shash_desc_ctx(pdesc); - ctx->len = 0; - memset(ctx->dg, 0, bs); - + memset(dg, 0, bs); return 0; } @@ -797,39 +791,34 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - - while (len > 0) { - unsigned int l = min(len, bs - ctx->len); - - crypto_xor(&ctx->dg[ctx->len], p, l); - ctx->len +=l; - len -= l; - p += l; - - if (ctx->len == bs) { - crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - ctx->len = 0; - } - } - - return 0; + u8 *dg = shash_desc_ctx(pdesc); + + do { + crypto_xor(dg, p, bs); + crypto_cipher_encrypt_one(tfm, dg, dg); + p += bs; + len -= bs; + } while (len >= bs); + return len; } -static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); + u8 *dg = shash_desc_ctx(pdesc); - if (ctx->len) - crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - - memcpy(out, ctx->dg, bs); + if (len) { + crypto_xor(dg, src, len); + crypto_cipher_encrypt_one(tfm, out, dg); + return 0; + } + memcpy(out, dg, bs); return 0; } @@ -883,19 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) + - alg->cra_blocksize; + inst->alg.descsize = alg->cra_blocksize; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY; inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); inst->alg.base.cra_init = cbcmac_init_tfm; inst->alg.base.cra_exit = cbcmac_exit_tfm; inst->alg.init = crypto_cbcmac_digest_init; inst->alg.update = crypto_cbcmac_digest_update; - inst->alg.final = crypto_cbcmac_digest_final; + inst->alg.finup = crypto_cbcmac_digest_finup; inst->alg.setkey = crypto_cbcmac_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -940,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void) ARRAY_SIZE(crypto_ccm_tmpls)); } -subsys_initcall(crypto_ccm_module_init); +module_init(crypto_ccm_module_init); module_exit(crypto_ccm_module_exit); MODULE_LICENSE("GPL"); @@ -949,4 +938,4 @@ MODULE_ALIAS_CRYPTO("ccm_base"); MODULE_ALIAS_CRYPTO("rfc4309"); MODULE_ALIAS_CRYPTO("ccm"); MODULE_ALIAS_CRYPTO("cbcmac"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/chacha.c b/crypto/chacha.c new file mode 100644 index 000000000000..c3a11f4e2d13 --- /dev/null +++ b/crypto/chacha.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers + * + * Copyright (C) 2015 Martin Willi + * Copyright (C) 2018 Google LLC + */ + +#include <linux/unaligned.h> +#include <crypto/algapi.h> +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/module.h> + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static int chacha_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static int chacha20_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static int chacha12_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + +static int chacha_stream_xor(struct skcipher_request *req, + const struct chacha_ctx *ctx, + const u8 iv[CHACHA_IV_SIZE], bool arch) +{ + struct skcipher_walk walk; + struct chacha_state state; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + chacha_init(&state, ctx->key, iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); + + if (arch) + chacha_crypt(&state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, ctx->nrounds); + else + chacha_crypt_generic(&state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + ctx->nrounds); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + +static int crypto_chacha_crypt_generic(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + return chacha_stream_xor(req, ctx, req->iv, false); +} + +static int crypto_chacha_crypt_arch(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + return chacha_stream_xor(req, ctx, req->iv, true); +} + +static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct chacha_ctx subctx; + struct chacha_state state; + u8 real_iv[16]; + + /* Compute the subkey given the original key and first 128 nonce bits */ + chacha_init(&state, ctx->key, req->iv); + if (arch) + hchacha_block(&state, subctx.key, ctx->nrounds); + else + hchacha_block_generic(&state, subctx.key, ctx->nrounds); + subctx.nrounds = ctx->nrounds; + + /* Build the real IV */ + memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */ + memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */ + + /* Generate the stream and XOR it with the data */ + return chacha_stream_xor(req, &subctx, real_iv, arch); +} + +static int crypto_xchacha_crypt_generic(struct skcipher_request *req) +{ + return crypto_xchacha_crypt(req, false); +} + +static int crypto_xchacha_crypt_arch(struct skcipher_request *req) +{ + return crypto_xchacha_crypt(req, true); +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_chacha_crypt_generic, + .decrypt = crypto_chacha_crypt_generic, + }, + { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_xchacha_crypt_generic, + .decrypt = crypto_xchacha_crypt_generic, + }, + { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha12_setkey, + .encrypt = crypto_xchacha_crypt_generic, + .decrypt = crypto_xchacha_crypt_generic, + }, + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_chacha_crypt_arch, + .decrypt = crypto_chacha_crypt_arch, + }, + { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_xchacha_crypt_arch, + .decrypt = crypto_xchacha_crypt_arch, + }, + { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha12_setkey, + .encrypt = crypto_xchacha_crypt_arch, + .decrypt = crypto_xchacha_crypt_arch, + } +}; + +static unsigned int num_algs; + +static int __init crypto_chacha_mod_init(void) +{ + /* register the arch flavours only if they differ from generic */ + num_algs = ARRAY_SIZE(algs); + BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0); + if (!chacha_is_arch_optimized()) + num_algs /= 2; + + return crypto_register_skciphers(algs, num_algs); +} + +static void __exit crypto_chacha_mod_fini(void) +{ + crypto_unregister_skciphers(algs, num_algs); +} + +module_init(crypto_chacha_mod_init); +module_exit(crypto_chacha_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-generic"); +MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-generic"); +MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-generic"); +MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH)); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 9e4651330852..b4b5a7198d84 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -12,34 +12,21 @@ #include <crypto/chacha.h> #include <crypto/poly1305.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> +#include <linux/string.h> struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; - struct crypto_ahash_spawn poly; unsigned int saltlen; }; struct chachapoly_ctx { struct crypto_skcipher *chacha; - struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; - u8 salt[]; -}; - -struct poly_req { - /* zero byte padding for AD/ciphertext, as needed */ - u8 pad[POLY1305_BLOCK_SIZE]; - /* tail data with AD/ciphertext lengths */ - struct { - __le64 assoclen; - __le64 cryptlen; - } tail; - struct scatterlist src[1]; - struct ahash_request req; /* must be last member */ + u8 salt[] __counted_by(saltlen); }; struct chacha_req { @@ -62,7 +49,6 @@ struct chachapoly_req_ctx { /* request flags, with MAY_SLEEP cleared if needed */ u32 flags; union { - struct poly_req poly; struct chacha_req chacha; } u; }; @@ -105,16 +91,6 @@ static int poly_verify_tag(struct aead_request *req) return 0; } -static int poly_copy_tag(struct aead_request *req) -{ - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - scatterwalk_map_and_copy(rctx->tag, req->dst, - req->assoclen + rctx->cryptlen, - sizeof(rctx->tag), 1); - return 0; -} - static void chacha_decrypt_done(void *data, int err) { async_done_continue(data, err, poly_verify_tag); @@ -151,210 +127,76 @@ skip: return poly_verify_tag(req); } -static int poly_tail_continue(struct aead_request *req) -{ - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - if (rctx->cryptlen == req->cryptlen) /* encrypting */ - return poly_copy_tag(req); - - return chacha_decrypt(req); -} - -static void poly_tail_done(void *data, int err) -{ - async_done_continue(data, err, poly_tail_continue); -} - -static int poly_tail(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - preq->tail.assoclen = cpu_to_le64(rctx->assoclen); - preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); - sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_tail_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, - rctx->tag, sizeof(preq->tail)); - - err = crypto_ahash_finup(&preq->req); - if (err) - return err; - - return poly_tail_continue(req); -} - -static void poly_cipherpad_done(void *data, int err) -{ - async_done_continue(data, err, poly_tail); -} - -static int poly_cipherpad(struct aead_request *req) +static int poly_hash(struct aead_request *req) { - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; + const void *zp = page_address(ZERO_PAGE(0)); + struct scatterlist *sg = req->src; + struct poly1305_desc_ctx desc; + struct scatter_walk walk; + struct { + union { + struct { + __le64 assoclen; + __le64 cryptlen; + }; + u8 u8[16]; + }; + } tail; unsigned int padlen; - int err; - - padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipherpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); + unsigned int total; - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_tail(req); -} - -static void poly_cipher_done(void *data, int err) -{ - async_done_continue(data, err, poly_cipherpad); -} - -static int poly_cipher(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - struct scatterlist *crypt = req->src; - int err; + if (sg != req->dst) + memcpy_sglist(req->dst, sg, req->assoclen); if (rctx->cryptlen == req->cryptlen) /* encrypting */ - crypt = req->dst; - - crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipher_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; + sg = req->dst; - return poly_cipherpad(req); -} + poly1305_init(&desc, rctx->key); + scatterwalk_start(&walk, sg); -static void poly_adpad_done(void *data, int err) -{ - async_done_continue(data, err, poly_cipher); -} + total = rctx->assoclen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); -static int poly_adpad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - unsigned int padlen; - int err; + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_adpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_cipher(req); -} - -static void poly_ad_done(void *data, int err) -{ - async_done_continue(data, err, poly_adpad); -} - -static int poly_ad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_ad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_adpad(req); -} - -static void poly_setkey_done(void *data, int err) -{ - async_done_continue(data, err, poly_ad); -} + poly1305_update(&desc, zp, padlen); -static int poly_setkey(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + scatterwalk_skip(&walk, req->assoclen - rctx->assoclen); - sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); + total = rctx->cryptlen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_setkey_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_ad(req); -} - -static void poly_init_done(void *data, int err) -{ - async_done_continue(data, err, poly_setkey); -} + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } -static int poly_init(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; + poly1305_update(&desc, zp, padlen); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_init_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); + tail.assoclen = cpu_to_le64(rctx->assoclen); + tail.cryptlen = cpu_to_le64(rctx->cryptlen); + poly1305_update(&desc, tail.u8, sizeof(tail)); + memzero_explicit(&tail, sizeof(tail)); + poly1305_final(&desc, rctx->tag); - err = crypto_ahash_init(&preq->req); - if (err) - return err; + if (rctx->cryptlen != req->cryptlen) + return chacha_decrypt(req); - return poly_setkey(req); + memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag)); + return 0; } static void poly_genkey_done(void *data, int err) { - async_done_continue(data, err, poly_init); + async_done_continue(data, err, poly_hash); } static int poly_genkey(struct aead_request *req) @@ -388,7 +230,7 @@ static int poly_genkey(struct aead_request *req) if (err) return err; - return poly_init(req); + return poly_hash(req); } static void chacha_encrypt_done(void *data, int err) @@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req) /* encrypt call chain: * - chacha_encrypt/done() * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() - * - poly_copy_tag() + * - poly_hash() */ return chacha_encrypt(req); } @@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req) /* decrypt call chain: * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() + * - poly_hash() * - chacha_decrypt/done() * - poly_verify_tag() */ @@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm) struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *chacha; - struct crypto_ahash *poly; unsigned long align; - poly = crypto_spawn_ahash(&ictx->poly); - if (IS_ERR(poly)) - return PTR_ERR(poly); - chacha = crypto_spawn_skcipher(&ictx->chacha); - if (IS_ERR(chacha)) { - crypto_free_ahash(poly); + if (IS_ERR(chacha)) return PTR_ERR(chacha); - } ctx->chacha = chacha; - ctx->poly = poly; ctx->saltlen = ictx->saltlen; align = crypto_aead_alignmask(tfm); @@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm) crypto_aead_set_reqsize( tfm, align + offsetof(struct chachapoly_req_ctx, u) + - max(offsetof(struct chacha_req, req) + - sizeof(struct skcipher_request) + - crypto_skcipher_reqsize(chacha), - offsetof(struct poly_req, req) + - sizeof(struct ahash_request) + - crypto_ahash_reqsize(poly))); + offsetof(struct chacha_req, req) + + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(chacha)); return 0; } @@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm) { struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - crypto_free_ahash(ctx->poly); crypto_free_skcipher(ctx->chacha); } @@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst) struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->chacha); - crypto_drop_ahash(&ctx->poly); kfree(inst); } @@ -559,7 +375,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; struct skcipher_alg_common *chacha; - struct hash_alg_common *poly; int err; if (ivsize > CHACHAPOLY_IV_SIZE) @@ -581,14 +396,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); - err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[2]), 0, mask); - if (err) - goto err_free_inst; - poly = crypto_spawn_ahash_alg(&ctx->poly); - err = -EINVAL; - if (poly->digestsize != POLY1305_DIGEST_SIZE) + if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") && + strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic")) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ if (chacha->ivsize != CHACHA_IV_SIZE) @@ -599,16 +409,15 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_name, - poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305)", name, + chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_driver_name, - poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305-generic)", name, + chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_priority = (chacha->base.cra_priority + - poly->base.cra_priority) / 2; + inst->alg.base.cra_priority = chacha->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + @@ -667,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void) ARRAY_SIZE(rfc7539_tmpls)); } -subsys_initcall(chacha20poly1305_module_init); +module_init(chacha20poly1305_module_init); module_exit(chacha20poly1305_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c deleted file mode 100644 index 8beea79ab117..000000000000 --- a/crypto/chacha_generic.c +++ /dev/null @@ -1,139 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539) - * - * Copyright (C) 2015 Martin Willi - * Copyright (C) 2018 Google LLC - */ - -#include <asm/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/module.h> - -static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init_generic(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); - - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int crypto_chacha_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_stream_xor(req, ctx, req->iv); -} - -static int crypto_xchacha_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - /* Compute the subkey given the original key and first 128 nonce bits */ - chacha_init_generic(state, ctx->key, req->iv); - hchacha_block_generic(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - /* Build the real IV */ - memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */ - memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */ - - /* Generate the stream and XOR it with the data */ - return chacha_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = crypto_chacha_crypt, - .decrypt = crypto_chacha_crypt, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - } -}; - -static int __init chacha_generic_mod_init(void) -{ - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -} - -static void __exit chacha_generic_mod_fini(void) -{ - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -subsys_initcall(chacha_generic_mod_init); -module_exit(chacha_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-generic"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-generic"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-generic"); diff --git a/crypto/cipher.c b/crypto/cipher.c index 47c77a3e5978..1fe62bf79656 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -54,7 +53,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL"); static inline void cipher_crypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src, bool enc) @@ -82,14 +81,14 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, { cipher_crypt_one(tfm, dst, src, true); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL"); void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, false); } -EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); +EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL"); struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher) { diff --git a/crypto/cmac.c b/crypto/cmac.c index c7aa3665b076..1b03964abe00 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -13,9 +13,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> /* * +------------------------ @@ -31,22 +34,6 @@ struct cmac_tfm_ctx { __be64 consts[]; }; -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | cmac_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct cmac_desc_ctx { - unsigned int len; - u8 odds[]; -}; - static int crypto_cmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -102,13 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, static int crypto_cmac_digest_init(struct shash_desc *pdesc) { - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = &ctx->odds[bs]; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -117,77 +101,36 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } - - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); + u8 *prev = shash_desc_ctx(pdesc); - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len - 1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } - - crypto_xor(prev, odds, bs); crypto_xor(prev, (const u8 *)tctx->consts + offset, bs); - crypto_cipher_encrypt_one(tfm, out, prev); - return 0; } @@ -269,13 +212,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct cmac_desc_ctx) + - alg->cra_blocksize * 2; + inst->alg.descsize = alg->cra_blocksize; inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; - inst->alg.final = crypto_cmac_digest_final; + inst->alg.finup = crypto_cmac_digest_finup; inst->alg.setkey = crypto_cmac_digest_setkey; inst->alg.init_tfm = cmac_init_tfm; inst->alg.clone_tfm = cmac_clone_tfm; @@ -307,10 +251,10 @@ static void __exit crypto_cmac_module_exit(void) crypto_unregister_template(&crypto_cmac_tmpl); } -subsys_initcall(crypto_cmac_module_init); +module_init(crypto_cmac_module_init); module_exit(crypto_cmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CMAC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("cmac"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/compress.c b/crypto/compress.c deleted file mode 100644 index 9048fe390c46..000000000000 --- a/crypto/compress.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * Compression operations. - * - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - */ -#include <linux/crypto.h> -#include "internal.h" - -int crypto_comp_compress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_compress); - -int crypto_comp_decompress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_decompress); diff --git a/crypto/compress.h b/crypto/compress.h index 19f65516d699..f7737a1fcbbd 100644 --- a/crypto/compress.h +++ b/crypto/compress.h @@ -13,13 +13,8 @@ struct acomp_req; struct comp_alg_common; -struct sk_buff; int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); -struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); -void crypto_acomp_scomp_free_ctx(struct acomp_req *req); - -int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg); void comp_prepare_alg(struct comp_alg_common *alg); diff --git a/crypto/crc32.c b/crypto/crc32.c new file mode 100644 index 000000000000..cc371d42601f --- /dev/null +++ b/crypto/crc32.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2012 Xyratex Technology Limited + */ + +/* + * This is crypto api shash wrappers to crc32_le. + */ + +#include <linux/unaligned.h> +#include <linux/crc32.h> +#include <crypto/internal/hash.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/kernel.h> + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +/** No default init with ~0 */ +static int crc32_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = 0; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int crc32_setkey(struct crypto_shash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_shash_ctx(hash); + + if (keylen != sizeof(u32)) + return -EINVAL; + *mctx = get_unaligned_le32(key); + return 0; +} + +static int crc32_init(struct shash_desc *desc) +{ + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *crcp = shash_desc_ctx(desc); + + *crcp = *mctx; + + return 0; +} + +static int crc32_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *crcp = shash_desc_ctx(desc); + + *crcp = crc32_le_base(*crcp, data, len); + return 0; +} + +static int crc32_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *crcp = shash_desc_ctx(desc); + + *crcp = crc32_le(*crcp, data, len); + return 0; +} + +/* No final XOR 0xFFFFFFFF, like crc32_le */ +static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + put_unaligned_le32(crc32_le_base(*crcp, data, len), out); + return 0; +} + +static int __crc32_finup_arch(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + put_unaligned_le32(crc32_le(*crcp, data, len), out); + return 0; +} + +static int crc32_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(shash_desc_ctx(desc), data, len, out); +} + +static int crc32_finup_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup_arch(shash_desc_ctx(desc), data, len, out); +} + +static int crc32_final(struct shash_desc *desc, u8 *out) +{ + u32 *crcp = shash_desc_ctx(desc); + + put_unaligned_le32(*crcp, out); + return 0; +} + +static int crc32_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, out); +} + +static int crc32_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup_arch(crypto_shash_ctx(desc->tfm), data, len, out); +} + +static struct shash_alg algs[] = {{ + .setkey = crc32_setkey, + .init = crc32_init, + .update = crc32_update, + .final = crc32_final, + .finup = crc32_finup, + .digest = crc32_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + + .base.cra_name = "crc32", + .base.cra_driver_name = "crc32-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(u32), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32_cra_init, +}, { + .setkey = crc32_setkey, + .init = crc32_init, + .update = crc32_update_arch, + .final = crc32_final, + .finup = crc32_finup_arch, + .digest = crc32_digest_arch, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + + .base.cra_name = "crc32", + .base.cra_driver_name = "crc32-" __stringify(ARCH), + .base.cra_priority = 150, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(u32), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32_cra_init, +}}; + +static int num_algs; + +static int __init crc32_mod_init(void) +{ + /* register the arch flavor only if it differs from the generic one */ + num_algs = 1 + ((crc32_optimizations() & CRC32_LE_OPTIMIZATION) != 0); + + return crypto_register_shashes(algs, num_algs); +} + +static void __exit crc32_mod_fini(void) +{ + crypto_unregister_shashes(algs, num_algs); +} + +module_init(crc32_mod_init); +module_exit(crc32_mod_fini); + +MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); +MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CRYPTO("crc32"); +MODULE_ALIAS_CRYPTO("crc32-generic"); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c deleted file mode 100644 index a989cb44fd16..000000000000 --- a/crypto/crc32_generic.c +++ /dev/null @@ -1,132 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2012 Xyratex Technology Limited - */ - -/* - * This is crypto api shash wrappers to crc32_le. - */ - -#include <asm/unaligned.h> -#include <linux/crc32.h> -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -/** No default init with ~0 */ -static int crc32_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 0; - - return 0; -} - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) - return -EINVAL; - *mctx = get_unaligned_le32(key); - return 0; -} - -static int crc32_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crcp = shash_desc_ctx(desc); - - *crcp = *mctx; - - return 0; -} - -static int crc32_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *crcp = shash_desc_ctx(desc); - - *crcp = crc32_le(*crcp, data, len); - return 0; -} - -/* No final XOR 0xFFFFFFFF, like crc32_le */ -static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) -{ - put_unaligned_le32(crc32_le(*crcp, data, len), out); - return 0; -} - -static int crc32_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32_finup(shash_desc_ctx(desc), data, len, out); -} - -static int crc32_final(struct shash_desc *desc, u8 *out) -{ - u32 *crcp = shash_desc_ctx(desc); - - put_unaligned_le32(*crcp, out); - return 0; -} - -static int crc32_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} -static struct shash_alg alg = { - .setkey = crc32_setkey, - .init = crc32_init, - .update = crc32_update, - .final = crc32_final, - .finup = crc32_finup, - .digest = crc32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32", - .cra_driver_name = "crc32-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32_cra_init, - } -}; - -static int __init crc32_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crc32_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(crc32_mod_init); -module_exit(crc32_mod_fini); - -MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); -MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crc32"); -MODULE_ALIAS_CRYPTO("crc32-generic"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c index 768614738541..e5377898414a 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c.c @@ -30,7 +30,7 @@ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> @@ -85,7 +85,16 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - ctx->crc = __crc32c_le(ctx->crc, data, length); + ctx->crc = crc32c_base(ctx->crc, data, length); + return 0; +} + +static int chksum_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc32c(ctx->crc, data, length); return 0; } @@ -99,7 +108,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { - put_unaligned_le32(~__crc32c_le(*crcp, data, len), out); + put_unaligned_le32(~crc32c_base(*crcp, data, len), out); + return 0; +} + +static int __chksum_finup_arch(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + put_unaligned_le32(~crc32c(*crcp, data, len), out); return 0; } @@ -111,6 +127,14 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, return __chksum_finup(&ctx->crc, data, len, out); } +static int chksum_finup_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup_arch(&ctx->crc, data, len, out); +} + static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { @@ -119,6 +143,14 @@ static int chksum_digest(struct shash_desc *desc, const u8 *data, return __chksum_finup(&mctx->key, data, length, out); } +static int chksum_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksum_finup_arch(&mctx->key, data, length, out); +} + static int crc32c_cra_init(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); @@ -127,38 +159,60 @@ static int crc32c_cra_init(struct crypto_tfm *tfm) return 0; } -static struct shash_alg alg = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = chksum_setkey, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct chksum_ctx), - .cra_module = THIS_MODULE, - .cra_init = crc32c_cra_init, - } -}; +static struct shash_alg algs[] = {{ + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + + .base.cra_name = "crc32c", + .base.cra_driver_name = "crc32c-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct chksum_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32c_cra_init, +}, { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update_arch, + .final = chksum_final, + .finup = chksum_finup_arch, + .digest = chksum_digest_arch, + .descsize = sizeof(struct chksum_desc_ctx), + + .base.cra_name = "crc32c", + .base.cra_driver_name = "crc32c-" __stringify(ARCH), + .base.cra_priority = 150, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, + .base.cra_blocksize = CHKSUM_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct chksum_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_init = crc32c_cra_init, +}}; + +static int num_algs; static int __init crc32c_mod_init(void) { - return crypto_register_shash(&alg); + /* register the arch flavor only if it differs from the generic one */ + num_algs = 1 + ((crc32_optimizations() & CRC32C_OPTIMIZATION) != 0); + + return crypto_register_shashes(algs, num_algs); } static void __exit crc32c_mod_fini(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shashes(algs, num_algs); } -subsys_initcall(crc32c_mod_init); +module_init(crc32c_mod_init); module_exit(crc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c deleted file mode 100644 index 9e812bb26dba..000000000000 --- a/crypto/crc64_rocksoft_generic.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - -#include <linux/crc64.h> -#include <linux/module.h> -#include <crypto/internal/hash.h> -#include <asm/unaligned.h> - -static int chksum_init(struct shash_desc *desc) -{ - u64 *crc = shash_desc_ctx(desc); - - *crc = 0; - - return 0; -} - -static int chksum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u64 *crc = shash_desc_ctx(desc); - - *crc = crc64_rocksoft_generic(*crc, data, length); - - return 0; -} - -static int chksum_final(struct shash_desc *desc, u8 *out) -{ - u64 *crc = shash_desc_ctx(desc); - - put_unaligned_le64(*crc, out); - return 0; -} - -static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out) -{ - crc = crc64_rocksoft_generic(crc, data, len); - put_unaligned_le64(crc, out); - return 0; -} - -static int chksum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - u64 *crc = shash_desc_ctx(desc); - - return __chksum_finup(*crc, data, len, out); -} - -static int chksum_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - return __chksum_finup(0, data, length, out); -} - -static struct shash_alg alg = { - .digestsize = sizeof(u64), - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(u64), - .base = { - .cra_name = CRC64_ROCKSOFT_STRING, - .cra_driver_name = "crc64-rocksoft-generic", - .cra_priority = 200, - .cra_blocksize = 1, - .cra_module = THIS_MODULE, - } -}; - -static int __init crc64_rocksoft_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crc64_rocksoft_exit(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(crc64_rocksoft_init); -module_exit(crc64_rocksoft_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Rocksoft model CRC64 calculation."); -MODULE_ALIAS_CRYPTO("crc64-rocksoft"); -MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic"); diff --git a/crypto/crct10dif_common.c b/crypto/crct10dif_common.c deleted file mode 100644 index b2fab366f518..000000000000 --- a/crypto/crct10dif_common.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Cryptographic API. - * - * T10 Data Integrity Field CRC16 Crypto Transform - * - * Copyright (c) 2007 Oracle Corporation. All rights reserved. - * Written by Martin K. Petersen <martin.petersen@oracle.com> - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/crc-t10dif.h> -#include <linux/module.h> -#include <linux/kernel.h> - -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; - -__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) -{ - unsigned int i; - - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; - - return crc; -} -EXPORT_SYMBOL(crc_t10dif_generic); - -MODULE_DESCRIPTION("T10 DIF CRC calculation common code"); -MODULE_LICENSE("GPL"); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c deleted file mode 100644 index e843982073bb..000000000000 --- a/crypto/crct10dif_generic.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Cryptographic API. - * - * T10 Data Integrity Field CRC16 Crypto Transform - * - * Copyright (c) 2007 Oracle Corporation. All rights reserved. - * Written by Martin K. Petersen <martin.petersen@oracle.com> - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/module.h> -#include <linux/crc-t10dif.h> -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/kernel.h> - -struct chksum_desc_ctx { - __u16 crc; -}; - -/* - * Steps through buffer one byte at a time, calculates reflected - * crc using table. - */ - -static int chksum_init(struct shash_desc *desc) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = 0; - - return 0; -} - -static int chksum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = crc_t10dif_generic(ctx->crc, data, length); - return 0; -} - -static int chksum_final(struct shash_desc *desc, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - *(__u16 *)out = ctx->crc; - return 0; -} - -static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) -{ - *(__u16 *)out = crc_t10dif_generic(crc, data, len); - return 0; -} - -static int chksum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(ctx->crc, data, len, out); -} - -static int chksum_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - return __chksum_finup(0, data, length, out); -} - -static struct shash_alg alg = { - .digestsize = CRC_T10DIF_DIGEST_SIZE, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crct10dif", - .cra_driver_name = "crct10dif-generic", - .cra_priority = 100, - .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init crct10dif_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crct10dif_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -subsys_initcall(crct10dif_mod_init); -module_exit(crct10dif_mod_fini); - -MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); -MODULE_DESCRIPTION("T10 DIF CRC calculation."); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crct10dif"); -MODULE_ALIAS_CRYPTO("crct10dif-generic"); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 31d022d47f7a..5bb6f8d88cc2 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -1138,7 +1138,7 @@ static void __exit cryptd_exit(void) crypto_unregister_template(&cryptd_tmpl); } -subsys_initcall(cryptd_init); +module_init(cryptd_init); module_exit(cryptd_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index e60a0eb628e8..445d3c113ee1 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -23,9 +23,6 @@ #define CRYPTO_ENGINE_MAX_QLEN 10 -/* Temporary algorithm flag used to indicate an updated driver. */ -#define CRYPTO_ALG_ENGINE 0x200 - struct crypto_engine_alg { struct crypto_alg base; struct crypto_engine_op op; @@ -148,16 +145,9 @@ start_request: } } - if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { - alg = container_of(async_req->tfm->__crt_alg, - struct crypto_engine_alg, base); - op = &alg->op; - } else { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; - } - + alg = container_of(async_req->tfm->__crt_alg, + struct crypto_engine_alg, base); + op = &alg->op; ret = op->do_one_request(engine, async_req); /* Request unsuccessfully executed by hardware */ @@ -517,7 +507,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); - engine->kworker = kthread_create_worker(0, "%s", engine->name); + engine->kworker = kthread_run_worker(0, "%s", engine->name); if (IS_ERR(engine->kworker)) { dev_err(dev, "failed to create crypto request pump task\n"); return NULL; @@ -569,9 +559,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_aead(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_aead); @@ -614,9 +601,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_ahash(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); @@ -660,9 +644,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_akcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); @@ -677,9 +658,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_kpp(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); @@ -694,9 +672,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_skcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 5b84b0f7cc17..34588f39fdfc 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -15,25 +15,11 @@ #include <crypto/null.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/mm.h> #include <linux/string.h> -static DEFINE_MUTEX(crypto_default_null_skcipher_lock); -static struct crypto_sync_skcipher *crypto_default_null_skcipher; -static int crypto_default_null_skcipher_refcnt; - -static int null_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - if (slen > *dlen) - return -EINVAL; - memcpy(dst, src, slen); - *dlen = slen; - return 0; -} - static int null_init(struct shash_desc *desc) { return 0; @@ -75,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static int null_skcipher_crypt(struct skcipher_request *req) { - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr != walk.dst.virt.addr) - memcpy(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, req->cryptlen); + return 0; } static struct shash_alg digest_null = { @@ -121,7 +97,7 @@ static struct skcipher_alg skcipher_null = { .decrypt = null_skcipher_crypt, }; -static struct crypto_alg null_algs[] = { { +static struct crypto_alg cipher_null = { .cra_name = "cipher_null", .cra_driver_name = "cipher_null-generic", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, @@ -134,62 +110,16 @@ static struct crypto_alg null_algs[] = { { .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } -}, { - .cra_name = "compress_null", - .cra_driver_name = "compress_null-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_u = { .compress = { - .coa_compress = null_compress, - .coa_decompress = null_compress } } -} }; +}; -MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); -struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) -{ - struct crypto_sync_skcipher *tfm; - - mutex_lock(&crypto_default_null_skcipher_lock); - tfm = crypto_default_null_skcipher; - - if (!tfm) { - tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); - if (IS_ERR(tfm)) - goto unlock; - - crypto_default_null_skcipher = tfm; - } - - crypto_default_null_skcipher_refcnt++; - -unlock: - mutex_unlock(&crypto_default_null_skcipher_lock); - - return tfm; -} -EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); - -void crypto_put_default_null_skcipher(void) -{ - mutex_lock(&crypto_default_null_skcipher_lock); - if (!--crypto_default_null_skcipher_refcnt) { - crypto_free_sync_skcipher(crypto_default_null_skcipher); - crypto_default_null_skcipher = NULL; - } - mutex_unlock(&crypto_default_null_skcipher_lock); -} -EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); - static int __init crypto_null_mod_init(void) { int ret = 0; - ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs)); + ret = crypto_register_alg(&cipher_null); if (ret < 0) goto out; @@ -206,19 +136,19 @@ static int __init crypto_null_mod_init(void) out_unregister_shash: crypto_unregister_shash(&digest_null); out_unregister_algs: - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); out: return ret; } static void __exit crypto_null_mod_fini(void) { - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); crypto_unregister_shash(&digest_null); crypto_unregister_skcipher(&skcipher_null); } -subsys_initcall(crypto_null_mod_init); +module_init(crypto_null_mod_init); module_exit(crypto_null_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user.c index 3fa20f12989f..aad429bef03e 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user.c @@ -18,7 +18,6 @@ #include <crypto/internal/rng.h> #include <crypto/akcipher.h> #include <crypto/kpp.h> -#include <crypto/internal/cryptouser.h> #include "internal.h" @@ -33,7 +32,7 @@ struct crypto_dump_info { u16 nlmsg_flags; }; -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) +static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; @@ -85,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) sizeof(rcipher), &rcipher); } -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_report_comp rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp); -} - static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -137,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - - break; } out: @@ -387,6 +370,13 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, return crypto_del_default_rng(); } +static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + /* No longer supported */ + return -ENOTSUPP; +} + #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c deleted file mode 100644 index d4f3d39b5137..000000000000 --- a/crypto/crypto_user_stat.c +++ /dev/null @@ -1,176 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Crypto user configuration API. - * - * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com> - * - */ - -#include <crypto/algapi.h> -#include <crypto/internal/cryptouser.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/string.h> -#include <net/netlink.h> -#include <net/sock.h> - -#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) - -struct crypto_dump_info { - struct sk_buff *in_skb; - struct sk_buff *out_skb; - u32 nlmsg_seq; - u16 nlmsg_flags; -}; - -static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_cipher rcipher; - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_stat_compress rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); -} - -static int crypto_reportstat_one(struct crypto_alg *alg, - struct crypto_user_alg *ualg, - struct sk_buff *skb) -{ - memset(ualg, 0, sizeof(*ualg)); - - strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strscpy(ualg->cru_driver_name, alg->cra_driver_name, - sizeof(ualg->cru_driver_name)); - strscpy(ualg->cru_module_name, module_name(alg->cra_module), - sizeof(ualg->cru_module_name)); - - ualg->cru_type = 0; - ualg->cru_mask = 0; - ualg->cru_flags = alg->cra_flags; - ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); - - if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) - goto nla_put_failure; - if (alg->cra_flags & CRYPTO_ALG_LARVAL) { - struct crypto_stat_larval rl; - - memset(&rl, 0, sizeof(rl)); - strscpy(rl.type, "larval", sizeof(rl.type)); - if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) - goto nla_put_failure; - goto out; - } - - if (alg->cra_type && alg->cra_type->report_stat) { - if (alg->cra_type->report_stat(skb, alg)) - goto nla_put_failure; - goto out; - } - - switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { - case CRYPTO_ALG_TYPE_CIPHER: - if (crypto_report_cipher(skb, alg)) - goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - break; - default: - pr_err("ERROR: Unhandled alg %d in %s\n", - alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), - __func__); - } - -out: - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -static int crypto_reportstat_alg(struct crypto_alg *alg, - struct crypto_dump_info *info) -{ - struct sk_buff *in_skb = info->in_skb; - struct sk_buff *skb = info->out_skb; - struct nlmsghdr *nlh; - struct crypto_user_alg *ualg; - int err = 0; - - nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, - CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); - if (!nlh) { - err = -EMSGSIZE; - goto out; - } - - ualg = nlmsg_data(nlh); - - err = crypto_reportstat_one(alg, ualg, skb); - if (err) { - nlmsg_cancel(skb, nlh); - goto out; - } - - nlmsg_end(skb, nlh); - -out: - return err; -} - -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - struct net *net = sock_net(in_skb->sk); - struct crypto_user_alg *p = nlmsg_data(in_nlh); - struct crypto_alg *alg; - struct sk_buff *skb; - struct crypto_dump_info info; - int err; - - if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) - return -EINVAL; - - alg = crypto_alg_match(p, 0); - if (!alg) - return -ENOENT; - - err = -ENOMEM; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); - if (!skb) - goto drop_alg; - - info.in_skb = in_skb; - info.out_skb = skb; - info.nlmsg_seq = in_nlh->nlmsg_seq; - info.nlmsg_flags = 0; - - err = crypto_reportstat_alg(alg, &info); - -drop_alg: - crypto_mod_put(alg); - - if (err) { - kfree_skb(skb); - return err; - } - - return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); -} - -MODULE_LICENSE("GPL"); diff --git a/crypto/ctr.c b/crypto/ctr.c index 1420496062d5..a388f0ceb3a0 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk, u8 *ctrblk = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, unsigned int bsize = crypto_cipher_blocksize(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; + u8 *dst = walk->dst.virt.addr; u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - crypto_xor(src, keystream, bsize); + crypto_xor(dst, keystream, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -350,11 +350,11 @@ static void __exit crypto_ctr_module_exit(void) ARRAY_SIZE(crypto_ctr_tmpls)); } -subsys_initcall(crypto_ctr_module_init); +module_init(crypto_ctr_module_init); module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/cts.c b/crypto/cts.c index f5b42156b6c7..48898d5e24ff 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void) crypto_unregister_template(&crypto_cts_tmpl); } -subsys_initcall(crypto_cts_module_init); +module_init(crypto_cts_module_init); module_exit(crypto_cts_module_exit); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c index d055b0784c77..f3e56e73c66c 100644 --- a/crypto/curve25519-generic.c +++ b/crypto/curve25519-generic.c @@ -82,9 +82,10 @@ static void __exit curve25519_exit(void) crypto_unregister_kpp(&curve25519_alg); } -subsys_initcall(curve25519_init); +module_init(curve25519_init); module_exit(curve25519_exit); MODULE_ALIAS_CRYPTO("curve25519"); MODULE_ALIAS_CRYPTO("curve25519-generic"); +MODULE_DESCRIPTION("Curve25519 elliptic curve (RFC7748)"); MODULE_LICENSE("GPL"); diff --git a/crypto/deflate.c b/crypto/deflate.c index 6e31e0db0e86..fe8e4ad0fee1 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -6,308 +6,250 @@ * by IPCOMP (RFC 3173 & RFC 2394). * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> - * - * FIXME: deflate transforms will require up to a total of about 436k of kernel - * memory on i386 (390k for compression, the rest for decompression), as the - * current zlib kernel code uses a worst case pre-allocation system by default. - * This needs to be fixed so that the amount of memory required is properly - * related to the winbits and memlevel parameters. - * - * The default winbits of 11 should suit most packets, and it may be something - * to configure on a per-tfm basis in the future. - * - * Currently, compression history is not maintained between tfm calls, as - * it is not needed for IPCOMP and keeps the code simpler. It can be - * implemented if someone wants it. + * Copyright (c) 2023 Google, LLC. <ardb@kernel.org> + * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au> */ +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/crypto.h> +#include <linux/mutex.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/zlib.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <crypto/internal/scompress.h> #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION #define DEFLATE_DEF_WINBITS 11 #define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL -struct deflate_ctx { - struct z_stream_s comp_stream; - struct z_stream_s decomp_stream; +struct deflate_stream { + struct z_stream_s stream; + u8 workspace[]; }; -static int deflate_comp_init(struct deflate_ctx *ctx) -{ - int ret = 0; - struct z_stream_s *stream = &ctx->comp_stream; - - stream->workspace = vzalloc(zlib_deflate_workspacesize( - -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL)); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, - Z_DEFAULT_STRATEGY); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; -} +static DEFINE_MUTEX(deflate_stream_lock); -static int deflate_decomp_init(struct deflate_ctx *ctx) +static void *deflate_alloc_stream(void) { - int ret = 0; - struct z_stream_s *stream = &ctx->decomp_stream; + size_t size = max(zlib_inflate_workspacesize(), + zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, + DEFLATE_DEF_MEMLEVEL)); + struct deflate_stream *ctx; - stream->workspace = vzalloc(zlib_inflate_workspacesize()); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; -} + ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); -static void deflate_comp_exit(struct deflate_ctx *ctx) -{ - zlib_deflateEnd(&ctx->comp_stream); - vfree(ctx->comp_stream.workspace); -} + ctx->stream.workspace = ctx->workspace; -static void deflate_decomp_exit(struct deflate_ctx *ctx) -{ - zlib_inflateEnd(&ctx->decomp_stream); - vfree(ctx->decomp_stream.workspace); + return ctx; } -static int __deflate_init(void *ctx) +static struct crypto_acomp_streams deflate_streams = { + .alloc_ctx = deflate_alloc_stream, + .cfree_ctx = kvfree, +}; + +static int deflate_compress_one(struct acomp_req *req, + struct deflate_stream *ds) { + struct z_stream_s *stream = &ds->stream; + struct acomp_walk walk; int ret; - ret = deflate_comp_init(ctx); + ret = acomp_walk_virt(&walk, req, true); if (ret) - goto out; - ret = deflate_decomp_init(ctx); - if (ret) - deflate_comp_exit(ctx); -out: - return ret; -} + return ret; -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - struct deflate_ctx *ctx; - int ret; + do { + unsigned int dcur; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + dcur = acomp_walk_next_dst(&walk); + if (!dcur) + return -ENOSPC; - ret = __deflate_init(ctx); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; - return ctx; -} + do { + int flush = Z_FINISH; + unsigned int scur; -static int deflate_init(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + stream->avail_in = 0; + stream->next_in = NULL; - return __deflate_init(ctx); -} + scur = acomp_walk_next_src(&walk); + if (scur) { + if (acomp_walk_more_src(&walk, scur)) + flush = Z_NO_FLUSH; + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } -static void __deflate_exit(void *ctx) -{ - deflate_comp_exit(ctx); - deflate_decomp_exit(ctx); -} + ret = zlib_deflate(stream, flush); -static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) -{ - __deflate_exit(ctx); - kfree_sensitive(ctx); -} + if (scur) { + scur -= stream->avail_in; + acomp_walk_done_src(&walk, scur); + } + } while (ret == Z_OK && stream->avail_out); -static void deflate_exit(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK); - __deflate_exit(ctx); + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dlen = stream->total_out; + return 0; } -static int __deflate_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_compress(struct acomp_req *req) { - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->comp_stream; + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; + + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; - ret = zlib_deflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; + err = deflate_compress_one(req, ds); - ret = zlib_deflate(stream, Z_FINISH); - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int deflate_decompress_one(struct acomp_req *req, + struct deflate_stream *ds) { - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct z_stream_s *stream = &ds->stream; + bool out_of_space = false; + struct acomp_walk walk; + int ret; - return __deflate_compress(src, slen, dst, dlen, dctx); -} + ret = acomp_walk_virt(&walk, req, true); + if (ret) + return ret; -static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __deflate_compress(src, slen, dst, dlen, ctx); + do { + unsigned int scur; + + stream->avail_in = 0; + stream->next_in = NULL; + + scur = acomp_walk_next_src(&walk); + if (scur) { + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } + + do { + unsigned int dcur; + + dcur = acomp_walk_next_dst(&walk); + if (!dcur) { + out_of_space = true; + break; + } + + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; + + ret = zlib_inflate(stream, Z_NO_FLUSH); + + dcur -= stream->avail_out; + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK && stream->avail_in); + + if (scur) + acomp_walk_done_src(&walk, scur); + + if (out_of_space) + return -ENOSPC; + } while (ret == Z_OK); + + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dlen = stream->total_out; + return 0; } -static int __deflate_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_decompress(struct acomp_req *req) { + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->decomp_stream; + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; - ret = zlib_inflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; - - ret = zlib_inflate(stream, Z_SYNC_FLUSH); - /* - * Work around a bug in zlib, which sometimes wants to taste an extra - * byte when being used in the (undocumented) raw deflate mode. - * (From USAGI). - */ - if (ret == Z_OK && !stream->avail_in && stream->avail_out) { - u8 zerostuff = 0; - stream->next_in = &zerostuff; - stream->avail_in = 1; - ret = zlib_inflate(stream, Z_FINISH); - } - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; + err = deflate_decompress_one(req, ds); + out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int deflate_init(struct crypto_acomp *tfm) { - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + int ret; - return __deflate_decompress(src, slen, dst, dlen, dctx); -} + mutex_lock(&deflate_stream_lock); + ret = crypto_acomp_alloc_streams(&deflate_streams); + mutex_unlock(&deflate_stream_lock); -static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __deflate_decompress(src, slen, dst, dlen, ctx); + return ret; } -static struct crypto_alg alg = { - .cra_name = "deflate", - .cra_driver_name = "deflate-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct deflate_ctx), - .cra_module = THIS_MODULE, - .cra_init = deflate_init, - .cra_exit = deflate_exit, - .cra_u = { .compress = { - .coa_compress = deflate_compress, - .coa_decompress = deflate_decompress } } -}; - -static struct scomp_alg scomp = { - .alloc_ctx = deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", - .cra_module = THIS_MODULE, - } +static struct acomp_alg acomp = { + .compress = deflate_compress, + .decompress = deflate_decompress, + .init = deflate_init, + .base.cra_name = "deflate", + .base.cra_driver_name = "deflate-generic", + .base.cra_flags = CRYPTO_ALG_REQ_VIRT, + .base.cra_module = THIS_MODULE, }; static int __init deflate_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_acomp(&acomp); } static void __exit deflate_mod_fini(void) { - crypto_unregister_alg(&alg); - crypto_unregister_scomp(&scomp); + crypto_unregister_acomp(&acomp); + crypto_acomp_free_streams(&deflate_streams); } -subsys_initcall(deflate_mod_init); +module_init(deflate_mod_init); module_exit(deflate_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>"); MODULE_ALIAS_CRYPTO("deflate"); +MODULE_ALIAS_CRYPTO("deflate-generic"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 1274e18d3eb9..fce341400914 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void) crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs)); } -subsys_initcall(des_generic_mod_init); +module_init(des_generic_mod_init); module_exit(des_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/dh.c b/crypto/dh.c index 68d11d66c0b5..8250eeeebd0f 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -145,9 +145,9 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * ->p is odd, so no need to explicitly subtract one * from it before shifting to the right. */ - mpi_rshift(q, ctx->p, 1); + ret = mpi_rshift(q, ctx->p, 1) ?: + mpi_powm(val, y, q, ctx->p); - ret = mpi_powm(val, y, q, ctx->p); mpi_free(q); if (ret) { mpi_free(val); @@ -920,7 +920,7 @@ static void __exit dh_exit(void) crypto_unregister_kpp(&dh); } -subsys_initcall(dh_init); +module_init(dh_init); module_exit(dh_exit); MODULE_ALIAS_CRYPTO("dh"); MODULE_LICENSE("GPL"); diff --git a/crypto/drbg.c b/crypto/drbg.c index 3addce90930c..dbe4c8bb5ceb 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -101,6 +101,7 @@ #include <crypto/internal/cipher.h> #include <linux/kernel.h> #include <linux/jiffies.h> +#include <linux/string_choices.h> /*************************************************************** * Backend cipher definitions available to DRBG @@ -1412,7 +1413,7 @@ static int drbg_generate(struct drbg_state *drbg, if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", - drbg->pr ? "true" : "false", + str_true_false(drbg->pr), (drbg->seeded == DRBG_SEED_STATE_FULL ? "seeded" : "unseeded")); /* 9.3.1 steps 7.1 through 7.3 */ @@ -1562,7 +1563,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, bool reseed = true; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " - "%s\n", coreref, pr ? "enabled" : "disabled"); + "%s\n", coreref, str_enabled_disabled(pr)); mutex_lock(&drbg->drbg_mutex); /* 9.1 step 1 is implicit with the selected DRBG type */ @@ -2131,7 +2132,7 @@ static void __exit drbg_exit(void) crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } -subsys_initcall(drbg_init); +module_init(drbg_init); module_exit(drbg_exit); #ifndef CRYPTO_DRBG_HASH_STRING #define CRYPTO_DRBG_HASH_STRING "" @@ -2150,4 +2151,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " CRYPTO_DRBG_HMAC_STRING CRYPTO_DRBG_CTR_STRING); MODULE_ALIAS_CRYPTO("stdrng"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/ecb.c b/crypto/ecb.c index e3a67789050e..cd1b20456dad 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -219,10 +219,10 @@ static void __exit crypto_ecb_module_exit(void) crypto_unregister_template(&crypto_ecb_tmpl); } -subsys_initcall(crypto_ecb_module_init); +module_init(crypto_ecb_module_init); module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/ecc.c b/crypto/ecc.c index f53fb4d6af99..6cf9a945fc6c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -33,7 +33,7 @@ #include <crypto/ecdh.h> #include <crypto/rng.h> #include <crypto/internal/ecc.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/ratelimit.h> #include "ecc_curve_defs.h" @@ -60,12 +60,36 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id) return &nist_p256; case ECC_CURVE_NIST_P384: return &nist_p384; + case ECC_CURVE_NIST_P521: + return &nist_p521; default: return NULL; } } EXPORT_SYMBOL(ecc_get_curve); +void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, + u64 *out, unsigned int ndigits) +{ + int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); + unsigned int o = nbytes & 7; + __be64 msd = 0; + + /* diff > 0: not enough input bytes: set most significant digits to 0 */ + if (diff > 0) { + ndigits -= diff; + memset(&out[ndigits], 0, diff * sizeof(u64)); + } + + if (o) { + memcpy((u8 *)&msd + sizeof(msd) - o, in, o); + out[--ndigits] = be64_to_cpu(msd); + in += o; + } + ecc_swap_digits(in, out, ndigits); +} +EXPORT_SYMBOL(ecc_digits_from_bytes); + static u64 *ecc_alloc_digits_space(unsigned int ndigits) { size_t len = ndigits * sizeof(u64); @@ -689,7 +713,7 @@ static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod, static void vli_mmod_fast_192(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { - const unsigned int ndigits = 3; + const unsigned int ndigits = ECC_CURVE_NIST_P192_DIGITS; int carry; vli_set(result, product, ndigits); @@ -717,7 +741,7 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { int carry; - const unsigned int ndigits = 4; + const unsigned int ndigits = ECC_CURVE_NIST_P256_DIGITS; /* t */ vli_set(result, product, ndigits); @@ -800,7 +824,7 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product, const u64 *curve_prime, u64 *tmp) { int carry; - const unsigned int ndigits = 6; + const unsigned int ndigits = ECC_CURVE_NIST_P384_DIGITS; /* t */ vli_set(result, product, ndigits); @@ -902,6 +926,28 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product, #undef AND64H #undef AND64L +/* + * Computes result = product % curve_prime + * from "Recommendations for Discrete Logarithm-Based Cryptography: + * Elliptic Curve Domain Parameters" section G.1.4 + */ +static void vli_mmod_fast_521(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + const unsigned int ndigits = ECC_CURVE_NIST_P521_DIGITS; + size_t i; + + /* Initialize result with lowest 521 bits from product */ + vli_set(result, product, ndigits); + result[8] &= 0x1ff; + + for (i = 0; i < ndigits; i++) + tmp[i] = (product[8 + i] >> 9) | (product[9 + i] << 55); + tmp[8] &= 0x1ff; + + vli_mod_add(result, result, tmp, curve_prime, ndigits); +} + /* Computes result = product % curve_prime for different curve_primes. * * Note that curve_primes are distinguished just by heuristic check and @@ -932,15 +978,18 @@ static bool vli_mmod_fast(u64 *result, u64 *product, } switch (ndigits) { - case 3: + case ECC_CURVE_NIST_P192_DIGITS: vli_mmod_fast_192(result, product, curve_prime, tmp); break; - case 4: + case ECC_CURVE_NIST_P256_DIGITS: vli_mmod_fast_256(result, product, curve_prime, tmp); break; - case 6: + case ECC_CURVE_NIST_P384_DIGITS: vli_mmod_fast_384(result, product, curve_prime, tmp); break; + case ECC_CURVE_NIST_P521_DIGITS: + vli_mmod_fast_521(result, product, curve_prime, tmp); + break; default: pr_err_ratelimited("ecc: unsupported digits size!\n"); return false; @@ -1295,7 +1344,10 @@ static void ecc_point_mult(struct ecc_point *result, carry = vli_add(sk[0], scalar, curve->n, ndigits); vli_add(sk[1], sk[0], curve->n, ndigits); scalar = sk[!carry]; - num_bits = sizeof(u64) * ndigits * 8 + 1; + if (curve->nbits == 521) /* NIST P521 */ + num_bits = curve->nbits + 2; + else + num_bits = sizeof(u64) * ndigits * 8 + 1; vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); @@ -1416,6 +1468,12 @@ void ecc_point_mult_shamir(const struct ecc_point *result, } EXPORT_SYMBOL(ecc_point_mult_shamir); +/* + * This function performs checks equivalent to Appendix A.4.2 of FIPS 186-5. + * Whereas A.4.2 results in an integer in the interval [1, n-1], this function + * ensures that the integer is in the range of [2, n-3]. We are slightly + * stricter because of the currently used scalar multiplication algorithm. + */ static int __ecc_is_key_valid(const struct ecc_curve *curve, const u64 *private_key, unsigned int ndigits) { @@ -1455,31 +1513,29 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, EXPORT_SYMBOL(ecc_is_key_valid); /* - * ECC private keys are generated using the method of extra random bits, - * equivalent to that described in FIPS 186-4, Appendix B.4.1. - * - * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer - * than requested - * 0 <= c mod(n-1) <= n-2 and implies that - * 1 <= d <= n-1 + * ECC private keys are generated using the method of rejection sampling, + * equivalent to that described in FIPS 186-5, Appendix A.2.2. * * This method generates a private key uniformly distributed in the range - * [1, n-1]. + * [2, n-3]. */ -int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) +int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, + u64 *private_key) { const struct ecc_curve *curve = ecc_get_curve(curve_id); - u64 priv[ECC_MAX_DIGITS]; unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; unsigned int nbits = vli_num_bits(curve->n, ndigits); int err; - /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */ - if (nbits < 160 || ndigits > ARRAY_SIZE(priv)) + /* + * Step 1 & 2: check that N is included in Table 1 of FIPS 186-5, + * section 6.1.1. + */ + if (nbits < 224) return -EINVAL; /* - * FIPS 186-4 recommends that the private key should be obtained from a + * FIPS 186-5 recommends that the private key should be obtained from a * RBG with a security strength equal to or greater than the security * strength associated with N. * @@ -1492,17 +1548,17 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) if (crypto_get_default_rng()) return -EFAULT; - err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); + /* Step 3: obtain N returned_bits from the DRBG. */ + err = crypto_rng_get_bytes(crypto_default_rng, + (u8 *)private_key, nbytes); crypto_put_default_rng(); if (err) return err; - /* Make sure the private key is in the valid range. */ - if (__ecc_is_key_valid(curve, priv, ndigits)) + /* Step 4: make sure the private key is in the valid range. */ + if (__ecc_is_key_valid(curve, private_key, ndigits)) return -EINVAL; - ecc_swap_digits(priv, privkey, ndigits); - return 0; } EXPORT_SYMBOL(ecc_gen_privkey); @@ -1512,23 +1568,20 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, { int ret = 0; struct ecc_point *pk; - u64 priv[ECC_MAX_DIGITS]; const struct ecc_curve *curve = ecc_get_curve(curve_id); - if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) { + if (!private_key) { ret = -EINVAL; goto out; } - ecc_swap_digits(private_key, priv, ndigits); - pk = ecc_alloc_point(ndigits); if (!pk) { ret = -ENOMEM; goto out; } - ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); + ecc_point_mult(pk, &curve->g, private_key, NULL, curve, ndigits); /* SP800-56A rev 3 5.6.2.1.3 key check */ if (ecc_is_pubkey_valid_full(curve, pk)) { @@ -1612,13 +1665,11 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, { int ret = 0; struct ecc_point *product, *pk; - u64 priv[ECC_MAX_DIGITS]; u64 rand_z[ECC_MAX_DIGITS]; unsigned int nbytes; const struct ecc_curve *curve = ecc_get_curve(curve_id); - if (!private_key || !public_key || !curve || - ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) { + if (!private_key || !public_key || ndigits > ARRAY_SIZE(rand_z)) { ret = -EINVAL; goto out; } @@ -1639,15 +1690,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, if (ret) goto err_alloc_product; - ecc_swap_digits(private_key, priv, ndigits); - product = ecc_alloc_point(ndigits); if (!product) { ret = -ENOMEM; goto err_alloc_product; } - ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); + ecc_point_mult(product, pk, private_key, rand_z, curve, ndigits); if (ecc_point_is_zero(product)) { ret = -EFAULT; @@ -1657,7 +1706,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_swap_digits(product->x, secret, ndigits); err_validity: - memzero_explicit(priv, sizeof(priv)); memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: @@ -1667,4 +1715,5 @@ out: } EXPORT_SYMBOL(crypto_ecdh_shared_secret); +MODULE_DESCRIPTION("core elliptic curve module"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index 9719934c9428..0ecade7d02f5 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -17,6 +17,7 @@ static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, 0x64210519E59C80E7ull }; static struct ecc_curve nist_p192 = { .name = "nist_192", + .nbits = 192, .g = { .x = nist_p192_g_x, .y = nist_p192_g_y, @@ -43,6 +44,7 @@ static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull }; static struct ecc_curve nist_p256 = { .name = "nist_256", + .nbits = 256, .g = { .x = nist_p256_g_x, .y = nist_p256_g_y, @@ -75,6 +77,7 @@ static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull, 0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull }; static struct ecc_curve nist_p384 = { .name = "nist_384", + .nbits = 384, .g = { .x = nist_p384_g_x, .y = nist_p384_g_y, @@ -86,6 +89,51 @@ static struct ecc_curve nist_p384 = { .b = nist_p384_b }; +/* NIST P-521 */ +static u64 nist_p521_g_x[] = { 0xf97e7e31c2e5bd66ull, 0x3348b3c1856a429bull, + 0xfe1dc127a2ffa8deull, 0xa14b5e77efe75928ull, + 0xf828af606b4d3dbaull, 0x9c648139053fb521ull, + 0x9e3ecb662395b442ull, 0x858e06b70404e9cdull, + 0xc6ull }; +static u64 nist_p521_g_y[] = { 0x88be94769fd16650ull, 0x353c7086a272c240ull, + 0xc550b9013fad0761ull, 0x97ee72995ef42640ull, + 0x17afbd17273e662cull, 0x98f54449579b4468ull, + 0x5c8a5fb42c7d1bd9ull, 0x39296a789a3bc004ull, + 0x118ull }; +static u64 nist_p521_p[] = { 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_n[] = { 0xbb6fb71e91386409ull, 0x3bb5c9b8899c47aeull, + 0x7fcc0148f709a5d0ull, 0x51868783bf2f966bull, + 0xfffffffffffffffaull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_a[] = { 0xfffffffffffffffcull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0xffffffffffffffffull, 0xffffffffffffffffull, + 0x1ffull }; +static u64 nist_p521_b[] = { 0xef451fd46b503f00ull, 0x3573df883d2c34f1ull, + 0x1652c0bd3bb1bf07ull, 0x56193951ec7e937bull, + 0xb8b489918ef109e1ull, 0xa2da725b99b315f3ull, + 0x929a21a0b68540eeull, 0x953eb9618e1c9a1full, + 0x051ull }; +static struct ecc_curve nist_p521 = { + .name = "nist_521", + .nbits = 521, + .g = { + .x = nist_p521_g_x, + .y = nist_p521_g_y, + .ndigits = 9, + }, + .p = nist_p521_p, + .n = nist_p521_n, + .a = nist_p521_a, + .b = nist_p521_b +}; + /* curve25519 */ static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; @@ -95,6 +143,7 @@ static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 }; static const struct ecc_curve ecc_25519 = { .name = "curve25519", + .nbits = 255, .g = { .x = curve25519_g_x, .ndigits = 4, diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 80afee3234fb..9f0b93b3166d 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -28,23 +28,28 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, { struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh params; + int ret = 0; if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || params.key_size > sizeof(u64) * ctx->ndigits) return -EINVAL; + memset(ctx->private_key, 0, sizeof(ctx->private_key)); + if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); - memcpy(ctx->private_key, params.key, params.key_size); + ecc_digits_from_bytes(params.key, params.key_size, + ctx->private_key, ctx->ndigits); if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, ctx->private_key, params.key_size) < 0) { memzero_explicit(ctx->private_key, params.key_size); - return -EINVAL; + ret = -EINVAL; } - return 0; + + return ret; } static int ecdh_compute_value(struct kpp_request *req) @@ -235,7 +240,7 @@ static void __exit ecdh_exit(void) crypto_unregister_kpp(&ecdh_nist_p384); } -subsys_initcall(ecdh_init); +module_init(ecdh_init); module_exit(ecdh_exit); MODULE_ALIAS_CRYPTO("ecdh"); MODULE_LICENSE("GPL"); diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c new file mode 100644 index 000000000000..e0c55c64711c --- /dev/null +++ b/crypto/ecdsa-p1363.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ECDSA P1363 signature encoding + * + * Copyright (c) 2024 Intel Corporation + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <crypto/algapi.h> +#include <crypto/sig.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> + +struct ecdsa_p1363_ctx { + struct crypto_sig *child; +}; + +static int ecdsa_p1363_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); + unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64)); + struct ecdsa_raw_sig sig; + + if (slen != 2 * keylen) + return -EINVAL; + + ecc_digits_from_bytes(src, keylen, sig.r, ndigits); + ecc_digits_from_bytes(src + keylen, keylen, sig.s, ndigits); + + return crypto_sig_verify(ctx->child, &sig, sizeof(sig), digest, dlen); +} + +static unsigned int ecdsa_p1363_key_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_keysize(ctx->child); +} + +static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); +} + +static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_digestsize(ctx->child); +} + +static int ecdsa_p1363_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_set_pubkey(ctx->child, key, keylen); +} + +static int ecdsa_p1363_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_sig *child_tfm; + + child_tfm = crypto_spawn_sig(spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void ecdsa_p1363_exit_tfm(struct crypto_sig *tfm) +{ + struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_sig(ctx->child); +} + +static void ecdsa_p1363_free(struct sig_instance *inst) +{ + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + + crypto_drop_sig(spawn); + kfree(inst); +} + +static int ecdsa_p1363_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_sig_spawn *spawn; + struct sig_instance *inst; + struct sig_alg *ecdsa_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = sig_instance_ctx(inst); + + err = crypto_grab_sig(spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + ecdsa_alg = crypto_spawn_sig_alg(spawn); + + err = -EINVAL; + if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0) + goto err_free_inst; + + err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name, + &ecdsa_alg->base); + if (err) + goto err_free_inst; + + inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_p1363_ctx); + + inst->alg.init = ecdsa_p1363_init_tfm; + inst->alg.exit = ecdsa_p1363_exit_tfm; + + inst->alg.verify = ecdsa_p1363_verify; + inst->alg.key_size = ecdsa_p1363_key_size; + inst->alg.max_size = ecdsa_p1363_max_size; + inst->alg.digest_size = ecdsa_p1363_digest_size; + inst->alg.set_pub_key = ecdsa_p1363_set_pub_key; + + inst->free = ecdsa_p1363_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ecdsa_p1363_free(inst); + } + return err; +} + +struct crypto_template ecdsa_p1363_tmpl = { + .name = "p1363", + .create = ecdsa_p1363_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("p1363"); diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c new file mode 100644 index 000000000000..ee71594d10a0 --- /dev/null +++ b/crypto/ecdsa-x962.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ECDSA X9.62 signature encoding + * + * Copyright (c) 2021 IBM Corporation + * Copyright (c) 2024 Intel Corporation + */ + +#include <linux/asn1_decoder.h> +#include <linux/err.h> +#include <linux/module.h> +#include <crypto/algapi.h> +#include <crypto/sig.h> +#include <crypto/internal/ecc.h> +#include <crypto/internal/sig.h> + +#include "ecdsasignature.asn1.h" + +struct ecdsa_x962_ctx { + struct crypto_sig *child; +}; + +struct ecdsa_x962_signature_ctx { + struct ecdsa_raw_sig sig; + unsigned int ndigits; +}; + +/* Get the r and s components of a signature from the X.509 certificate. */ +static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen, + unsigned int ndigits) +{ + size_t bufsize = ndigits * sizeof(u64); + const char *d = value; + + if (!value || !vlen || vlen > bufsize + 1) + return -EINVAL; + + /* + * vlen may be 1 byte larger than bufsize due to a leading zero byte + * (necessary if the most significant bit of the integer is set). + */ + if (vlen > bufsize) { + /* skip over leading zeros that make 'value' a positive int */ + if (*d == 0) { + vlen -= 1; + d++; + } else { + return -EINVAL; + } + } + + ecc_digits_from_bytes(d, vlen, dest, ndigits); + + return 0; +} + +int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_x962_signature_ctx *sig_ctx = context; + + return ecdsa_get_signature_rs(sig_ctx->sig.r, hdrlen, tag, value, vlen, + sig_ctx->ndigits); +} + +int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_x962_signature_ctx *sig_ctx = context; + + return ecdsa_get_signature_rs(sig_ctx->sig.s, hdrlen, tag, value, vlen, + sig_ctx->ndigits); +} + +static int ecdsa_x962_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct ecdsa_x962_signature_ctx sig_ctx; + int err; + + sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + sizeof(u64) * BITS_PER_BYTE); + + err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen); + if (err < 0) + return err; + + return crypto_sig_verify(ctx->child, &sig_ctx.sig, sizeof(sig_ctx.sig), + digest, dlen); +} + +static unsigned int ecdsa_x962_key_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_keysize(ctx->child); +} + +static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct sig_alg *alg = crypto_sig_alg(ctx->child); + int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); + + /* + * Verify takes ECDSA-Sig-Value (described in RFC 5480) as input, + * which is actually 2 'key_size'-bit integers encoded in ASN.1. + * Account for the ASN.1 encoding overhead here. + * + * NIST P192/256/384 may prepend a '0' to a coordinate to indicate + * a positive integer. NIST P521 never needs it. + */ + if (strcmp(alg->base.cra_name, "ecdsa-nist-p521") != 0) + slen += 1; + + /* Length of encoding the x & y coordinates */ + slen = 2 * (slen + 2); + + /* + * If coordinate encoding takes at least 128 bytes then an + * additional byte for length encoding is needed. + */ + return 1 + (slen >= 128) + 1 + slen; +} + +static unsigned int ecdsa_x962_digest_size(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_digestsize(ctx->child); +} + +static int ecdsa_x962_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + return crypto_sig_set_pubkey(ctx->child, key, keylen); +} + +static int ecdsa_x962_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_sig *child_tfm; + + child_tfm = crypto_spawn_sig(spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void ecdsa_x962_exit_tfm(struct crypto_sig *tfm) +{ + struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_sig(ctx->child); +} + +static void ecdsa_x962_free(struct sig_instance *inst) +{ + struct crypto_sig_spawn *spawn = sig_instance_ctx(inst); + + crypto_drop_sig(spawn); + kfree(inst); +} + +static int ecdsa_x962_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_sig_spawn *spawn; + struct sig_instance *inst; + struct sig_alg *ecdsa_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = sig_instance_ctx(inst); + + err = crypto_grab_sig(spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + ecdsa_alg = crypto_spawn_sig_alg(spawn); + + err = -EINVAL; + if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0) + goto err_free_inst; + + err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name, + &ecdsa_alg->base); + if (err) + goto err_free_inst; + + inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_x962_ctx); + + inst->alg.init = ecdsa_x962_init_tfm; + inst->alg.exit = ecdsa_x962_exit_tfm; + + inst->alg.verify = ecdsa_x962_verify; + inst->alg.key_size = ecdsa_x962_key_size; + inst->alg.max_size = ecdsa_x962_max_size; + inst->alg.digest_size = ecdsa_x962_digest_size; + inst->alg.set_pub_key = ecdsa_x962_set_pub_key; + + inst->free = ecdsa_x962_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ecdsa_x962_free(inst); + } + return err; +} + +struct crypto_template ecdsa_x962_tmpl = { + .name = "x962", + .create = ecdsa_x962_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("x962"); diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c index fbd76498aba8..ce8e4364842f 100644 --- a/crypto/ecdsa.c +++ b/crypto/ecdsa.c @@ -4,14 +4,11 @@ */ #include <linux/module.h> -#include <crypto/internal/akcipher.h> #include <crypto/internal/ecc.h> -#include <crypto/akcipher.h> +#include <crypto/internal/sig.h> #include <crypto/ecdh.h> -#include <linux/asn1_decoder.h> -#include <linux/scatterlist.h> - -#include "ecdsasignature.asn1.h" +#include <crypto/sha2.h> +#include <crypto/sig.h> struct ecc_ctx { unsigned int curve_id; @@ -23,74 +20,6 @@ struct ecc_ctx { struct ecc_point pub_key; }; -struct ecdsa_signature_ctx { - const struct ecc_curve *curve; - u64 r[ECC_MAX_DIGITS]; - u64 s[ECC_MAX_DIGITS]; -}; - -/* - * Get the r and s components of a signature from the X509 certificate. - */ -static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen, unsigned int ndigits) -{ - size_t keylen = ndigits * sizeof(u64); - ssize_t diff = vlen - keylen; - const char *d = value; - u8 rs[ECC_MAX_BYTES]; - - if (!value || !vlen) - return -EINVAL; - - /* diff = 0: 'value' has exacly the right size - * diff > 0: 'value' has too many bytes; one leading zero is allowed that - * makes the value a positive integer; error on more - * diff < 0: 'value' is missing leading zeros, which we add - */ - if (diff > 0) { - /* skip over leading zeros that make 'value' a positive int */ - if (*d == 0) { - vlen -= 1; - diff--; - d++; - } - if (diff) - return -EINVAL; - } - if (-diff >= keylen) - return -EINVAL; - - if (diff) { - /* leading zeros not given in 'value' */ - memset(rs, 0, -diff); - } - - memcpy(&rs[-diff], d, vlen); - - ecc_swap_digits((u64 *)rs, dest, ndigits); - - return 0; -} - -int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct ecdsa_signature_ctx *sig = context; - - return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen, - sig->curve->g.ndigits); -} - -int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct ecdsa_signature_ctx *sig = context; - - return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen, - sig->curve->g.ndigits); -} - static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s) { const struct ecc_curve *curve = ctx->curve; @@ -122,7 +51,7 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con /* res.x = res.x mod n (if res.x > order) */ if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1)) - /* faster alternative for NIST p384, p256 & p192 */ + /* faster alternative for NIST p521, p384, p256 & p192 */ vli_sub(res.x, res.x, curve->n, ndigits); if (!vli_cmp(res.x, r, ndigits)) @@ -134,55 +63,27 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con /* * Verify an ECDSA signature. */ -static int ecdsa_verify(struct akcipher_request *req) +static int ecdsa_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) { - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); - size_t keylen = ctx->curve->g.ndigits * sizeof(u64); - struct ecdsa_signature_ctx sig_ctx = { - .curve = ctx->curve, - }; - u8 rawhash[ECC_MAX_BYTES]; + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + size_t bufsize = ctx->curve->g.ndigits * sizeof(u64); + const struct ecdsa_raw_sig *sig = src; u64 hash[ECC_MAX_DIGITS]; - unsigned char *buffer; - ssize_t diff; - int ret; if (unlikely(!ctx->pub_key_set)) return -EINVAL; - buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, req->src_len + req->dst_len), - buffer, req->src_len + req->dst_len, 0); - - ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, - buffer, req->src_len); - if (ret < 0) - goto error; - - /* if the hash is shorter then we will add leading zeros to fit to ndigits */ - diff = keylen - req->dst_len; - if (diff >= 0) { - if (diff) - memset(rawhash, 0, diff); - memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len); - } else if (diff < 0) { - /* given hash is longer, we take the left-most bytes */ - memcpy(&rawhash, buffer + req->src_len, keylen); - } - - ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); + if (slen != sizeof(*sig)) + return -EINVAL; - ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + if (bufsize > dlen) + bufsize = dlen; -error: - kfree(buffer); + ecc_digits_from_bytes(digest, bufsize, hash, ctx->curve->g.ndigits); - return ret; + return _ecdsa_verify(ctx, hash, sig->r, sig->s); } static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) @@ -215,35 +116,39 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx) } /* - * Set the public key given the raw uncompressed key data from an X509 - * certificate. The key data contain the concatenated X and Y coordinates of - * the public key. + * Set the public ECC key as defined by RFC5480 section 2.2 "Subject Public + * Key". Only the uncompressed format is supported. */ -static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) +static int ecdsa_set_pub_key(struct crypto_sig *tfm, const void *key, + unsigned int keylen) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int digitlen, ndigits; const unsigned char *d = key; - const u64 *digits = (const u64 *)&d[1]; - unsigned int ndigits; int ret; ret = ecdsa_ecc_ctx_reset(ctx); if (ret < 0) return ret; - if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0) + if (keylen < 1 || ((keylen - 1) & 1) != 0) return -EINVAL; /* we only accept uncompressed format indicated by '4' */ if (d[0] != 4) return -EINVAL; keylen--; - ndigits = (keylen >> 1) / sizeof(u64); + digitlen = keylen >> 1; + + ndigits = DIV_ROUND_UP(digitlen, sizeof(u64)); if (ndigits != ctx->curve->g.ndigits) return -EINVAL; - ecc_swap_digits(digits, ctx->pub_key.x, ndigits); - ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); + d++; + + ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits); + ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits); + ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); ctx->pub_key_set = ret == 0; @@ -251,31 +156,66 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig return ret; } -static void ecdsa_exit_tfm(struct crypto_akcipher *tfm) +static void ecdsa_exit_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); ecdsa_ecc_ctx_deinit(ctx); } -static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm) +static unsigned int ecdsa_key_size(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); - return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + return ctx->curve->nbits; +} + +static unsigned int ecdsa_digest_size(struct crypto_sig *tfm) +{ + /* + * ECDSA key sizes are much smaller than RSA, and thus could + * operate on (hashed) inputs that are larger than the key size. + * E.g. SHA384-hashed input used with secp256r1 based keys. + * Return the largest supported hash size (SHA512). + */ + return SHA512_DIGEST_SIZE; } -static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p521_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P521); +} + +static struct sig_alg ecdsa_nist_p521 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, + .init = ecdsa_nist_p521_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p521", + .cra_driver_name = "ecdsa-nist-p521-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p384_init_tfm(struct crypto_sig *tfm) +{ + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384); } -static struct akcipher_alg ecdsa_nist_p384 = { +static struct sig_alg ecdsa_nist_p384 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p384_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -287,17 +227,18 @@ static struct akcipher_alg ecdsa_nist_p384 = { }, }; -static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p256_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256); } -static struct akcipher_alg ecdsa_nist_p256 = { +static struct sig_alg ecdsa_nist_p256 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p256_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -309,17 +250,18 @@ static struct akcipher_alg ecdsa_nist_p256 = { }, }; -static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm) +static int ecdsa_nist_p192_init_tfm(struct crypto_sig *tfm) { - struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecc_ctx *ctx = crypto_sig_ctx(tfm); return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192); } -static struct akcipher_alg ecdsa_nist_p192 = { +static struct sig_alg ecdsa_nist_p192 = { .verify = ecdsa_verify, .set_pub_key = ecdsa_set_pub_key, - .max_size = ecdsa_max_size, + .key_size = ecdsa_key_size, + .digest_size = ecdsa_digest_size, .init = ecdsa_nist_p192_init_tfm, .exit = ecdsa_exit_tfm, .base = { @@ -337,40 +279,69 @@ static int __init ecdsa_init(void) int ret; /* NIST p192 may not be available in FIPS mode */ - ret = crypto_register_akcipher(&ecdsa_nist_p192); + ret = crypto_register_sig(&ecdsa_nist_p192); ecdsa_nist_p192_registered = ret == 0; - ret = crypto_register_akcipher(&ecdsa_nist_p256); + ret = crypto_register_sig(&ecdsa_nist_p256); if (ret) goto nist_p256_error; - ret = crypto_register_akcipher(&ecdsa_nist_p384); + ret = crypto_register_sig(&ecdsa_nist_p384); if (ret) goto nist_p384_error; + ret = crypto_register_sig(&ecdsa_nist_p521); + if (ret) + goto nist_p521_error; + + ret = crypto_register_template(&ecdsa_x962_tmpl); + if (ret) + goto x962_tmpl_error; + + ret = crypto_register_template(&ecdsa_p1363_tmpl); + if (ret) + goto p1363_tmpl_error; + return 0; +p1363_tmpl_error: + crypto_unregister_template(&ecdsa_x962_tmpl); + +x962_tmpl_error: + crypto_unregister_sig(&ecdsa_nist_p521); + +nist_p521_error: + crypto_unregister_sig(&ecdsa_nist_p384); + nist_p384_error: - crypto_unregister_akcipher(&ecdsa_nist_p256); + crypto_unregister_sig(&ecdsa_nist_p256); nist_p256_error: if (ecdsa_nist_p192_registered) - crypto_unregister_akcipher(&ecdsa_nist_p192); + crypto_unregister_sig(&ecdsa_nist_p192); return ret; } static void __exit ecdsa_exit(void) { + crypto_unregister_template(&ecdsa_x962_tmpl); + crypto_unregister_template(&ecdsa_p1363_tmpl); + if (ecdsa_nist_p192_registered) - crypto_unregister_akcipher(&ecdsa_nist_p192); - crypto_unregister_akcipher(&ecdsa_nist_p256); - crypto_unregister_akcipher(&ecdsa_nist_p384); + crypto_unregister_sig(&ecdsa_nist_p192); + crypto_unregister_sig(&ecdsa_nist_p256); + crypto_unregister_sig(&ecdsa_nist_p384); + crypto_unregister_sig(&ecdsa_nist_p521); } -subsys_initcall(ecdsa_init); +module_init(ecdsa_init); module_exit(ecdsa_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>"); MODULE_DESCRIPTION("ECDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p192"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p256"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p384"); +MODULE_ALIAS_CRYPTO("ecdsa-nist-p521"); MODULE_ALIAS_CRYPTO("ecdsa-generic"); diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 69686668625e..e0a2d3209938 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req) u64 seqno; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); - int err; if (req->cryptlen < ivsize) return -EINVAL; @@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req) info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); @@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void) crypto_unregister_template(&echainiv_tmpl); } -subsys_initcall(echainiv_module_init); +module_init(echainiv_module_init); module_exit(echainiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c index f3c6b5e15e75..2c0602f0cd40 100644 --- a/crypto/ecrdsa.c +++ b/crypto/ecrdsa.c @@ -18,12 +18,11 @@ #include <linux/module.h> #include <linux/crypto.h> +#include <crypto/sig.h> #include <crypto/streebog.h> -#include <crypto/internal/akcipher.h> #include <crypto/internal/ecc.h> -#include <crypto/akcipher.h> +#include <crypto/internal/sig.h> #include <linux/oid_registry.h> -#include <linux/scatterlist.h> #include "ecrdsa_params.asn1.h" #include "ecrdsa_pub_key.asn1.h" #include "ecrdsa_defs.h" @@ -68,13 +67,12 @@ static const struct ecc_curve *get_curve_by_oid(enum OID oid) } } -static int ecrdsa_verify(struct akcipher_request *req) +static int ecrdsa_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) { - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); - unsigned char sig[ECRDSA_MAX_SIG_SIZE]; - unsigned char digest[STREEBOG512_DIGEST_SIZE]; - unsigned int ndigits = req->dst_len / sizeof(u64); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int ndigits = dlen / sizeof(u64); u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */ u64 _r[ECRDSA_MAX_DIGITS]; /* -r */ u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */ @@ -91,25 +89,19 @@ static int ecrdsa_verify(struct akcipher_request *req) */ if (!ctx->curve || !ctx->digest || - !req->src || + !src || + !digest || !ctx->pub_key.x || - req->dst_len != ctx->digest_len || - req->dst_len != ctx->curve->g.ndigits * sizeof(u64) || + dlen != ctx->digest_len || + dlen != ctx->curve->g.ndigits * sizeof(u64) || ctx->pub_key.ndigits != ctx->curve->g.ndigits || - req->dst_len * 2 != req->src_len || - WARN_ON(req->src_len > sizeof(sig)) || - WARN_ON(req->dst_len > sizeof(digest))) + dlen * 2 != slen || + WARN_ON(slen > ECRDSA_MAX_SIG_SIZE) || + WARN_ON(dlen > STREEBOG512_DIGEST_SIZE)) return -EBADMSG; - sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len), - sig, req->src_len); - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, - req->src_len + req->dst_len), - digest, req->dst_len, req->src_len); - - vli_from_be64(s, sig, ndigits); - vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits); + vli_from_be64(s, src, ndigits); + vli_from_be64(r, src + ndigits * sizeof(u64), ndigits); /* Step 1: verify that 0 < r < q, 0 < s < q */ if (vli_is_zero(r, ndigits) || @@ -188,10 +180,10 @@ static u8 *ecrdsa_unpack_u32(u32 *dst, void *src) } /* Parse BER encoded subjectPublicKey. */ -static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, +static int ecrdsa_set_pub_key(struct crypto_sig *tfm, const void *key, unsigned int keylen) { - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); unsigned int ndigits; u32 algo, paramlen; u8 *params; @@ -249,24 +241,32 @@ static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return 0; } -static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm) +static unsigned int ecrdsa_key_size(struct crypto_sig *tfm) { - struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); /* * Verify doesn't need any output, so it's just informational * for keyctl to determine the key bit size. */ - return ctx->pub_key.ndigits * sizeof(u64); + return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE; +} + +static unsigned int ecrdsa_max_size(struct crypto_sig *tfm) +{ + struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm); + + return 2 * ctx->pub_key.ndigits * sizeof(u64); } -static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm) +static void ecrdsa_exit_tfm(struct crypto_sig *tfm) { } -static struct akcipher_alg ecrdsa_alg = { +static struct sig_alg ecrdsa_alg = { .verify = ecrdsa_verify, .set_pub_key = ecrdsa_set_pub_key, + .key_size = ecrdsa_key_size, .max_size = ecrdsa_max_size, .exit = ecrdsa_exit_tfm, .base = { @@ -280,12 +280,12 @@ static struct akcipher_alg ecrdsa_alg = { static int __init ecrdsa_mod_init(void) { - return crypto_register_akcipher(&ecrdsa_alg); + return crypto_register_sig(&ecrdsa_alg); } static void __exit ecrdsa_mod_fini(void) { - crypto_unregister_akcipher(&ecrdsa_alg); + crypto_unregister_sig(&ecrdsa_alg); } module_init(ecrdsa_mod_init); @@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>"); MODULE_DESCRIPTION("EC-RDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecrdsa"); MODULE_ALIAS_CRYPTO("ecrdsa-generic"); diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h index 0056335b9d03..1c2c2449e331 100644 --- a/crypto/ecrdsa_defs.h +++ b/crypto/ecrdsa_defs.h @@ -47,6 +47,7 @@ static u64 cp256a_b[] = { static struct ecc_curve gost_cp256a = { .name = "cp256a", + .nbits = 256, .g = { .x = cp256a_g_x, .y = cp256a_g_y, @@ -80,6 +81,7 @@ static u64 cp256b_b[] = { static struct ecc_curve gost_cp256b = { .name = "cp256b", + .nbits = 256, .g = { .x = cp256b_g_x, .y = cp256b_g_y, @@ -117,6 +119,7 @@ static u64 cp256c_b[] = { static struct ecc_curve gost_cp256c = { .name = "cp256c", + .nbits = 256, .g = { .x = cp256c_g_x, .y = cp256c_g_y, @@ -166,6 +169,7 @@ static u64 tc512a_b[] = { static struct ecc_curve gost_tc512a = { .name = "tc512a", + .nbits = 512, .g = { .x = tc512a_g_x, .y = tc512a_g_y, @@ -211,6 +215,7 @@ static u64 tc512b_b[] = { static struct ecc_curve gost_tc512b = { .name = "tc512b", + .nbits = 512, .g = { .x = tc512b_g_x, .y = tc512b_g_y, diff --git a/crypto/essiv.c b/crypto/essiv.c index e63fc6442e32..d003b78fcd85 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -405,8 +405,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name) if (len >= CRYPTO_MAX_ALG_NAME) return false; - memcpy(essiv_cipher_name, p, len); - essiv_cipher_name[len] = '\0'; + strscpy(essiv_cipher_name, p, len + 1); return true; } @@ -549,8 +548,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) } /* record the driver name so we can instantiate this exact algo later */ - strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, - CRYPTO_MAX_ALG_NAME); + strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name); /* Instance fields */ @@ -643,10 +641,10 @@ static void __exit essiv_module_exit(void) crypto_unregister_template(&essiv_tmpl); } -subsys_initcall(essiv_module_init); +module_init(essiv_module_init); module_exit(essiv_module_exit); MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("essiv"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 95a16e88899b..80036835cec5 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void) crypto_unregister_alg(&fcrypt_alg); } -subsys_initcall(fcrypt_mod_init); +module_init(fcrypt_mod_init); module_exit(fcrypt_mod_fini); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/fips.c b/crypto/fips.c index 92fd506abb21..e88a604cb42b 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/notifier.h> +#include <linux/string_choices.h> #include <generated/utsrelease.h> int fips_enabled; @@ -24,8 +25,7 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain); static int fips_enable(char *str) { fips_enabled = !!simple_strtol(str, NULL, 0); - printk(KERN_INFO "fips mode: %s\n", - fips_enabled ? "enabled" : "disabled"); + pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled)); return 1; } @@ -41,7 +41,7 @@ __setup("fips=", fips_enable); static char fips_name[] = FIPS_MODULE_NAME; static char fips_version[] = FIPS_MODULE_VERSION; -static struct ctl_table crypto_sysctl_table[] = { +static const struct ctl_table crypto_sysctl_table[] = { { .procname = "fips_enabled", .data = &fips_enabled, @@ -63,7 +63,6 @@ static struct ctl_table crypto_sysctl_table[] = { .mode = 0444, .proc_handler = proc_dostring }, - {} }; static struct ctl_table_header *crypto_sysctls; @@ -96,5 +95,5 @@ static void __exit fips_exit(void) crypto_proc_fips_exit(); } -subsys_initcall(fips_init); +module_init(fips_init); module_exit(fips_exit); diff --git a/crypto/gcm.c b/crypto/gcm.c index 84f7c23d14e4..97716482bed0 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -9,7 +9,6 @@ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include <crypto/hash.h> @@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx { struct crypto_rfc4543_ctx { struct crypto_aead *child; - struct crypto_sync_skcipher *null; u8 nonce[4]; }; @@ -79,8 +77,6 @@ static struct { struct scatterlist sg; } *gcm_zeroes; -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); - static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { @@ -930,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) unsigned int authsize = crypto_aead_authsize(aead); u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); - int err; if (req->src != req->dst) { - err = crypto_rfc4543_copy_src_to_dst(req, enc); - if (err) - return err; + unsigned int nbytes = req->assoclen + req->cryptlen - + (enc ? 0 : authsize); + + memcpy_sglist(req->dst, req->src, nbytes); } memcpy(iv, ctx->nonce, 4); @@ -952,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); } -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); - unsigned int authsize = crypto_aead_authsize(aead); - unsigned int nbytes = req->assoclen + req->cryptlen - - (enc ? 0 : authsize); - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); - - skcipher_request_set_sync_tfm(nreq, ctx->null); - skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); - - return crypto_skcipher_encrypt(nreq); -} - static int crypto_rfc4543_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: @@ -987,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; - struct crypto_sync_skcipher *null; unsigned long align; - int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_aead; - ctx->child = aead; - ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); @@ -1012,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) align + GCM_AES_IV_SIZE); return 0; - -err_free_aead: - crypto_free_aead(aead); - return err; } static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) @@ -1023,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } static void crypto_rfc4543_free(struct aead_instance *inst) @@ -1152,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void) ARRAY_SIZE(crypto_gcm_tmpls)); } -subsys_initcall(crypto_gcm_module_init); +module_init(crypto_gcm_module_init); module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/geniv.c b/crypto/geniv.c index bee4621b4f12..42eff6a7387c 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -9,7 +9,6 @@ #include <crypto/internal/geniv.h> #include <crypto/internal/rng.h> -#include <crypto/null.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> @@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead) if (err) goto out; - ctx->sknull = crypto_get_default_null_skcipher(); - err = PTR_ERR(ctx->sknull); - if (IS_ERR(ctx->sknull)) - goto out; - child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); if (IS_ERR(child)) - goto drop_null; + goto out; ctx->child = child; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) + @@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead) out: return err; - -drop_null: - crypto_put_default_null_skcipher(); - goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); @@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm) struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index c70d163c1ac9..e5803c249c12 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -34,14 +34,14 @@ * (https://csrc.nist.gov/publications/detail/sp/800-38d/final) */ -#include <crypto/algapi.h> #include <crypto/gf128mul.h> #include <crypto/ghash.h> #include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/utils.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> static int ghash_init(struct shash_desc *desc) { @@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; - if (dctx->bytes) { - int n = min(srclen, dctx->bytes); - u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - - if (!dctx->bytes) - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - } - - while (srclen >= GHASH_BLOCK_SIZE) { + do { crypto_xor(dst, src, GHASH_BLOCK_SIZE); gf128mul_4k_lle((be128 *)dst, ctx->gf128); src += GHASH_BLOCK_SIZE; srclen -= GHASH_BLOCK_SIZE; - } - - if (srclen) { - dctx->bytes = GHASH_BLOCK_SIZE - srclen; - while (srclen--) - *dst++ ^= *src++; - } + } while (srclen >= GHASH_BLOCK_SIZE); - return 0; + return srclen; } -static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +static void ghash_flush(struct shash_desc *desc, const u8 *src, + unsigned int len) { + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); u8 *dst = dctx->buffer; - if (dctx->bytes) { - u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - while (dctx->bytes--) - *tmp++ ^= 0; - + if (len) { + crypto_xor(dst, src, len); gf128mul_4k_lle((be128 *)dst, ctx->gf128); } - - dctx->bytes = 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; - ghash_flush(ctx, dctx); + ghash_flush(desc, src, len); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; @@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, @@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -subsys_initcall(ghash_mod_init); +module_init(ghash_mod_init); module_exit(ghash_mod_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/hash.h b/crypto/hash.h index 93f6ba0df263..cf9aee07f77d 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -8,39 +8,9 @@ #define _LOCAL_CRYPTO_HASH_H #include <crypto/internal/hash.h> -#include <linux/cryptouser.h> #include "internal.h" -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_report_stat(struct sk_buff *skb, - struct crypto_alg *alg, - const char *type) -{ - struct hash_alg_common *halg = __crypto_hash_alg_common(alg); - struct crypto_istat_hash *istat = hash_get_stat(halg); - struct crypto_stat_hash rhash; - - memset(&rhash, 0, sizeof(rhash)); - - strscpy(rhash.type, type, sizeof(rhash.type)); - - rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen); - rhash.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); -} - extern const struct crypto_type crypto_shash_type; int hash_prepare_alg(struct hash_alg_common *alg); diff --git a/crypto/hctr2.c b/crypto/hctr2.c index 87e7547ad186..c8932777bba8 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -570,10 +570,10 @@ static void __exit hctr2_module_exit(void) ARRAY_SIZE(hctr2_tmpls)); } -subsys_initcall(hctr2_module_init); +module_init(hctr2_module_init); module_exit(hctr2_module_exit); MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("hctr2"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/hkdf.c b/crypto/hkdf.c new file mode 100644 index 000000000000..82d1b32ca6ce --- /dev/null +++ b/crypto/hkdf.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation + * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010): + * "Cryptographic Extraction and Key Derivation: The HKDF Scheme". + * + * Copyright 2019 Google LLC + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <crypto/hkdf.h> +#include <linux/module.h> + +/* + * HKDF consists of two steps: + * + * 1. HKDF-Extract: extract a pseudorandom key from the input keying material + * and optional salt. + * 2. HKDF-Expand: expand the pseudorandom key into output keying material of + * any length, parameterized by an application-specific info string. + * + */ + +/** + * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2) + * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The + * caller is responsible for setting the @prk afterwards. + * @ikm: input keying material + * @ikmlen: length of @ikm + * @salt: input salt value + * @saltlen: length of @salt + * @prk: resulting pseudorandom key + * + * Extracts a pseudorandom key @prk from the input keying material + * @ikm with length @ikmlen and salt @salt with length @saltlen. + * The length of @prk is given by the digest size of @hmac_tfm. + * For an 'unsalted' version of HKDF-Extract @salt must be set + * to all zeroes and @saltlen must be set to the length of @prk. + * + * Returns 0 on success with the pseudorandom key stored in @prk, + * or a negative errno value otherwise. + */ +int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, + unsigned int ikmlen, const u8 *salt, unsigned int saltlen, + u8 *prk) +{ + int err; + + err = crypto_shash_setkey(hmac_tfm, salt, saltlen); + if (!err) + err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk); + + return err; +} +EXPORT_SYMBOL_GPL(hkdf_extract); + +/** + * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3) + * @hmac_tfm: hash context keyed with pseudorandom key + * @info: application-specific information + * @infolen: length of @info + * @okm: output keying material + * @okmlen: length of @okm + * + * This expands the pseudorandom key, which was already keyed into @hmac_tfm, + * into @okmlen bytes of output keying material parameterized by the + * application-specific @info of length @infolen bytes. + * This is thread-safe and may be called by multiple threads in parallel. + * + * Returns 0 on success with output keying material stored in @okm, + * or a negative errno value otherwise. + */ +int hkdf_expand(struct crypto_shash *hmac_tfm, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen) +{ + SHASH_DESC_ON_STACK(desc, hmac_tfm); + unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm); + int err; + const u8 *prev = NULL; + u8 counter = 1; + u8 tmp[HASH_MAX_DIGESTSIZE] = {}; + + if (WARN_ON(okmlen > 255 * hashlen)) + return -EINVAL; + + desc->tfm = hmac_tfm; + + for (i = 0; i < okmlen; i += hashlen) { + err = crypto_shash_init(desc); + if (err) + goto out; + + if (prev) { + err = crypto_shash_update(desc, prev, hashlen); + if (err) + goto out; + } + + if (infolen) { + err = crypto_shash_update(desc, info, infolen); + if (err) + goto out; + } + + BUILD_BUG_ON(sizeof(counter) != 1); + if (okmlen - i < hashlen) { + err = crypto_shash_finup(desc, &counter, 1, tmp); + if (err) + goto out; + memcpy(&okm[i], tmp, okmlen - i); + memzero_explicit(tmp, sizeof(tmp)); + } else { + err = crypto_shash_finup(desc, &counter, 1, &okm[i]); + if (err) + goto out; + } + counter++; + prev = &okm[i]; + } + err = 0; +out: + if (unlikely(err)) + memzero_explicit(okm, okmlen); /* so caller doesn't need to */ + shash_desc_zero(desc); + memzero_explicit(tmp, HASH_MAX_DIGESTSIZE); + return err; +} +EXPORT_SYMBOL_GPL(hkdf_expand); + +struct hkdf_testvec { + const char *test; + const u8 *ikm; + const u8 *salt; + const u8 *info; + const u8 *prk; + const u8 *okm; + u16 ikm_size; + u16 salt_size; + u16 info_size; + u16 prk_size; + u16 okm_size; +}; + +/* + * HKDF test vectors from RFC5869 + * + * Additional HKDF test vectors from + * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md + */ +static const struct hkdf_testvec hkdf_sha256_tv[] = { + { + .test = "basic hdkf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63" + "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5", + .prk_size = 32, + .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a" + "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf" + "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c" + "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44", + .prk_size = 32, + .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34" + "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c" + "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09" + "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71" + "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f" + "\x1d\x87", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf" + "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04", + .prk_size = 32, + .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31" + "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d" + "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c" + "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf", + .prk_size = 32, + .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43" + "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d" + "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 32, + .info = NULL, + .info_size = 0, + .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d" + "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf", + .prk_size = 32, + .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53" + "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a" + "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7", + .okm_size = 42, + } +}; + +static const struct hkdf_testvec hkdf_sha384_tv[] = { + { + .test = "basic hkdf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30" + "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde" + "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde", + .prk_size = 48, + .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38" + "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7" + "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3" + "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b" + "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb", + .prk_size = 48, + .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81" + "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee" + "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49" + "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6" + "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a" + "\x8b\x2e", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d" + "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b" + "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5", + .prk_size = 48, + .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf" + "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa" + "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0" + "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a" + "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72", + .prk_size = 48, + .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75" + "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1" + "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 48, + .info = NULL, + .info_size = 0, + .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e" + "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc" + "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5", + .prk_size = 48, + .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78" + "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b" + "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53", + .okm_size = 42, + } +}; + +static const struct hkdf_testvec hkdf_sha512_tv[] = { + { + .test = "basic hkdf test", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b" + "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26" + "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f" + "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37", + .prk_size = 64, + .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4" + "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93" + "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb", + .okm_size = 42, + }, { + .test = "hkdf test with long input", + .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f", + .ikm_size = 80, + .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf", + .salt_size = 80, + .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .info_size = 80, + .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d" + "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e" + "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe" + "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a", + .prk_size = 64, + .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67" + "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1" + "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a" + "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35" + "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e" + "\x7a\x93", + .okm_size = 82, + }, { + .test = "hkdf test with zero salt and info", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 22, + .salt = NULL, + .salt_size = 0, + .info = NULL, + .info_size = 0, + .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21" + "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b" + "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde" + "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6", + .prk_size = 64, + .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c" + "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f" + "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac", + .okm_size = 42, + }, { + .test = "hkdf test with short input", + .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ikm_size = 11, + .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c", + .salt_size = 13, + .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9", + .info_size = 10, + .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f" + "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f" + "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2" + "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d", + .prk_size = 64, + .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff" + "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35" + "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68", + .okm_size = 42, + }, { + .test = "unsalted hkdf test with zero info", + .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c" + "\x0c\x0c\x0c\x0c\x0c\x0c", + .ikm_size = 22, + .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + .salt_size = 64, + .info = NULL, + .info_size = 0, + .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89" + "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa" + "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1" + "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c", + .prk_size = 64, + .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90" + "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a" + "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb", + .okm_size = 42, + } +}; + +static int hkdf_test(const char *shash, const struct hkdf_testvec *tv) +{ struct crypto_shash *tfm = NULL; + u8 *prk = NULL, *okm = NULL; + unsigned int prk_size; + const char *driver; + int err; + + tfm = crypto_alloc_shash(shash, 0, 0); + if (IS_ERR(tfm)) { + pr_err("%s(%s): failed to allocate transform: %ld\n", + tv->test, shash, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + driver = crypto_shash_driver_name(tfm); + + prk_size = crypto_shash_digestsize(tfm); + prk = kzalloc(prk_size, GFP_KERNEL); + if (!prk) { + err = -ENOMEM; + goto out_free; + } + + if (tv->prk_size != prk_size) { + pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n", + tv->test, driver, tv->prk_size, prk_size); + err = -EINVAL; + goto out_free; + } + + err = hkdf_extract(tfm, tv->ikm, tv->ikm_size, + tv->salt, tv->salt_size, prk); + if (err) { + pr_err("%s(%s): hkdf_extract failed with %d\n", + tv->test, driver, err); + goto out_free; + } + + if (memcmp(prk, tv->prk, tv->prk_size)) { + pr_err("%s(%s): hkdf_extract prk mismatch\n", + tv->test, driver); + print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE, + 16, 1, prk, tv->prk_size, false); + err = -EINVAL; + goto out_free; + } + + okm = kzalloc(tv->okm_size, GFP_KERNEL); + if (!okm) { + err = -ENOMEM; + goto out_free; + } + + err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size); + if (err) { + pr_err("%s(%s): failed to set prk, error %d\n", + tv->test, driver, err); + goto out_free; + } + + err = hkdf_expand(tfm, tv->info, tv->info_size, + okm, tv->okm_size); + if (err) { + pr_err("%s(%s): hkdf_expand() failed with %d\n", + tv->test, driver, err); + } else if (memcmp(okm, tv->okm, tv->okm_size)) { + pr_err("%s(%s): hkdf_expand() okm mismatch\n", + tv->test, driver); + print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE, + 16, 1, okm, tv->okm_size, false); + err = -EINVAL; + } +out_free: + kfree(okm); + kfree(prk); + crypto_free_shash(tfm); + return err; +} + +static int __init crypto_hkdf_module_init(void) +{ + int ret = 0, i; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return 0; + + for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) { + ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]); + if (ret) + return ret; + } + for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) { + ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]); + if (ret) + return ret; + } + for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) { + ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]); + if (ret) + return ret; + } + return 0; +} + +static void __exit crypto_hkdf_module_exit(void) {} + +late_initcall(crypto_hkdf_module_init); +module_exit(crypto_hkdf_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)"); diff --git a/crypto/hmac.c b/crypto/hmac.c index 7cec25ff9889..148af460ae97 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -13,13 +13,11 @@ #include <crypto/hmac.h> #include <crypto/internal/hash.h> -#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/fips.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/string.h> struct hmac_ctx { @@ -28,6 +26,12 @@ struct hmac_ctx { u8 pads[]; }; +struct ahash_hmac_ctx { + struct crypto_ahash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; +}; + static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -39,7 +43,7 @@ static int hmac_setkey(struct crypto_shash *parent, u8 *ipad = &tctx->pads[0]; u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); - unsigned int i; + int err, i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; @@ -65,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent, opad[i] ^= HMAC_OPAD_VALUE; } - return crypto_shash_init(shash) ?: - crypto_shash_update(shash, ipad, bs) ?: - crypto_shash_export(shash, ipad) ?: - crypto_shash_init(shash) ?: - crypto_shash_update(shash, opad, bs) ?: - crypto_shash_export(shash, opad); + err = crypto_shash_init(shash) ?: + crypto_shash_update(shash, ipad, bs) ?: + crypto_shash_export(shash, ipad) ?: + crypto_shash_init(shash) ?: + crypto_shash_update(shash, opad, bs) ?: + crypto_shash_export(shash, opad); + shash_desc_zero(shash); + return err; } static int hmac_export(struct shash_desc *pdesc, void *out) @@ -90,6 +96,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in) return crypto_shash_import(desc, in); } +static int hmac_export_core(struct shash_desc *pdesc, void *out) +{ + struct shash_desc *desc = shash_desc_ctx(pdesc); + + return crypto_shash_export_core(desc, out); +} + +static int hmac_import_core(struct shash_desc *pdesc, const void *in) +{ + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + struct shash_desc *desc = shash_desc_ctx(pdesc); + + desc->tfm = tctx->hash; + return crypto_shash_import_core(desc, in); +} + static int hmac_init(struct shash_desc *pdesc) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); @@ -105,20 +127,6 @@ static int hmac_update(struct shash_desc *pdesc, return crypto_shash_update(desc, data, nbytes); } -static int hmac_final(struct shash_desc *pdesc, u8 *out) -{ - struct crypto_shash *parent = pdesc->tfm; - int ds = crypto_shash_digestsize(parent); - int ss = crypto_shash_statesize(parent); - const struct hmac_ctx *tctx = crypto_shash_ctx(parent); - const u8 *opad = &tctx->pads[ss]; - struct shash_desc *desc = shash_desc_ctx(pdesc); - - return crypto_shash_final(desc, out) ?: - crypto_shash_import(desc, opad) ?: - crypto_shash_finup(desc, out, ds, out); -} - static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { @@ -146,9 +154,6 @@ static int hmac_init_tfm(struct crypto_shash *parent) if (IS_ERR(hash)) return PTR_ERR(hash); - parent->descsize = sizeof(struct shash_desc) + - crypto_shash_descsize(hash); - tctx->hash = hash; return 0; } @@ -174,26 +179,23 @@ static void hmac_exit_tfm(struct crypto_shash *parent) crypto_free_shash(tctx->hash); } -static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +static int __hmac_create_shash(struct crypto_template *tmpl, + struct rtattr **tb, u32 mask) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; - u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = shash_instance_ctx(inst); + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) ss < alg->cra_blocksize) goto err_free_inst; - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); + err = crypto_inst_setname(shash_crypto_instance(inst), "hmac", + "hmac-shash", alg); if (err) goto err_free_inst; @@ -222,12 +225,14 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.digestsize = ds; inst->alg.statesize = ss; + inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize; inst->alg.init = hmac_init; inst->alg.update = hmac_update; - inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; + inst->alg.export_core = hmac_export_core; + inst->alg.import_core = hmac_import_core; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; inst->alg.clone_tfm = hmac_clone_tfm; @@ -243,23 +248,332 @@ err_free_inst: return err; } -static struct crypto_template hmac_tmpl = { - .name = "hmac", - .create = hmac_create, - .module = THIS_MODULE, +static int hmac_setkey_ahash(struct crypto_ahash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash); + int ds = crypto_ahash_digestsize(parent); + int bs = crypto_ahash_blocksize(parent); + int ss = crypto_ahash_statesize(parent); + HASH_REQUEST_ON_STACK(req, fb); + u8 *opad = &tctx->pads[ss]; + u8 *ipad = &tctx->pads[0]; + int err, i; + + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; + + ahash_request_set_callback(req, 0, NULL, NULL); + + if (keylen > bs) { + ahash_request_set_virt(req, inkey, ipad, keylen); + err = crypto_ahash_digest(req); + if (err) + goto out_zero_req; + + keylen = ds; + } else + memcpy(ipad, inkey, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + ahash_request_set_virt(req, ipad, NULL, bs); + err = crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, ipad); + + ahash_request_set_virt(req, opad, NULL, bs); + err = err ?: + crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, opad); + +out_zero_req: + HASH_REQUEST_ZERO(req); + return err; +} + +static int hmac_export_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export(ahash_request_ctx(preq), out); +} + +static int hmac_import_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import(req, in); +} + +static int hmac_export_core_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export_core(ahash_request_ctx(preq), out); +} + +static int hmac_import_core_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import_core(req, in); +} + +static int hmac_init_ahash(struct ahash_request *preq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + + return hmac_import_ahash(preq, &tctx->pads[0]); +} + +static int hmac_update_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + preq->base.complete, preq->base.data); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes); + return crypto_ahash_update(req); +} + +static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_request *req = ahash_request_ctx(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + int ds = crypto_ahash_digestsize(tfm); + int ss = crypto_ahash_statesize(tfm); + const u8 *opad = &tctx->pads[ss]; + + ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask, + preq->base.complete, preq->base.data); + ahash_request_set_virt(req, preq->result, preq->result, ds); + return crypto_ahash_import(req, opad) ?: + crypto_ahash_finup(req); + +} + +static void hmac_finup_done(void *data, int err) +{ + struct ahash_request *preq = data; + + if (err) + goto out; + + err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + ahash_request_complete(preq, err); +} + +static int hmac_finup_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + hmac_finup_done, preq); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, preq->result, + preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, preq->result, + preq->nbytes); + return crypto_ahash_finup(req) ?: + hmac_finup_finish(preq, 0); +} + +static int hmac_digest_ahash(struct ahash_request *preq) +{ + return hmac_init_ahash(preq) ?: + hmac_finup_ahash(preq); +} + +static int hmac_init_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_instance *inst = ahash_alg_instance(parent); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *hash; + + hash = crypto_spawn_ahash(ahash_instance_ctx(inst)); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) + + crypto_ahash_reqsize(hash)) + return -EINVAL; + + tctx->hash = hash; + return 0; +} + +static int hmac_clone_ahash_tfm(struct crypto_ahash *dst, + struct crypto_ahash *src) +{ + struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src); + struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst); + struct crypto_ahash *hash; + + hash = crypto_clone_ahash(sctx->hash); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + dctx->hash = hash; + return 0; +} + +static void hmac_exit_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + + crypto_free_ahash(tctx->hash); +} + +static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb, + u32 mask) +{ + struct crypto_ahash_spawn *spawn; + struct ahash_instance *inst; + struct crypto_alg *alg; + struct hash_alg_common *halg; + int ds, ss, err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = ahash_instance_ctx(inst); + + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + halg = crypto_spawn_ahash_alg(spawn); + alg = &halg->base; + + /* The underlying hash algorithm must not require a key */ + err = -EINVAL; + if (crypto_hash_alg_needs_key(halg)) + goto err_free_inst; + + ds = halg->digestsize; + ss = halg->statesize; + if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) + goto err_free_inst; + + err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg); + if (err) + goto err_free_inst; + + inst->alg.halg.base.cra_flags = alg->cra_flags & + CRYPTO_ALG_INHERITED_FLAGS; + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT; + inst->alg.halg.base.cra_priority = alg->cra_priority + 100; + inst->alg.halg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) + + (ss * 2); + inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) + + alg->cra_reqsize; + + inst->alg.halg.digestsize = ds; + inst->alg.halg.statesize = ss; + inst->alg.init = hmac_init_ahash; + inst->alg.update = hmac_update_ahash; + inst->alg.finup = hmac_finup_ahash; + inst->alg.digest = hmac_digest_ahash; + inst->alg.export = hmac_export_ahash; + inst->alg.import = hmac_import_ahash; + inst->alg.export_core = hmac_export_core_ahash; + inst->alg.import_core = hmac_import_core_ahash; + inst->alg.setkey = hmac_setkey_ahash; + inst->alg.init_tfm = hmac_init_ahash_tfm; + inst->alg.clone_tfm = hmac_clone_ahash_tfm; + inst->alg.exit_tfm = hmac_exit_ahash_tfm; + + inst->free = ahash_free_singlespawn_instance; + + err = ahash_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ahash_free_singlespawn_instance(inst); + } + return err; +} + +static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + u32 mask; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = crypto_algt_inherited_mask(algt); + + if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK)) + return hmac_create_ahash(tmpl, tb, mask); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK) + return -EINVAL; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); + if (err) + return err == -EINVAL ? -ENOENT : err; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static struct crypto_template hmac_tmpls[] = { + { + .name = "hmac", + .create = hmac_create, + .module = THIS_MODULE, + }, + { + .name = "hmac-shash", + .create = hmac_create_shash, + .module = THIS_MODULE, + }, }; static int __init hmac_module_init(void) { - return crypto_register_template(&hmac_tmpl); + return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } static void __exit hmac_module_exit(void) { - crypto_unregister_template(&hmac_tmpl); + crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } -subsys_initcall(hmac_module_init); +module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/internal.h b/crypto/internal.h index 63e59240d5fb..b9afd68767c1 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -22,8 +22,6 @@ #include <linux/sched.h> #include <linux/types.h> -struct akcipher_request; -struct crypto_akcipher; struct crypto_instance; struct crypto_template; @@ -35,17 +33,20 @@ struct crypto_larval { bool test_started; }; -struct crypto_akcipher_sync_data { - struct crypto_akcipher *tfm; - const void *src; - void *dst; - unsigned int slen; - unsigned int dlen; - - struct akcipher_request *req; - struct crypto_wait cwait; - struct scatterlist sg; - u8 *buf; +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + void (*destroy)(struct crypto_alg *alg); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; + unsigned int algsize; }; enum { @@ -66,7 +67,7 @@ extern struct blocking_notifier_head crypto_chain; int alg_test(const char *driver, const char *alg, u32 type, u32 mask); -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) static inline bool crypto_boot_test_finished(void) { return true; @@ -84,7 +85,9 @@ static inline void set_crypto_boot_test_finished(void) { static_branch_enable(&__crypto_boot_test_finished); } -#endif /* !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ +#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || + * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) + */ #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); @@ -110,8 +113,7 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); -void crypto_larval_kill(struct crypto_alg *alg); -void crypto_wait_for_test(struct crypto_larval *larval); +void crypto_schedule_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, @@ -127,10 +129,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, void *crypto_clone_tfm(const struct crypto_type *frontend, struct crypto_tfm *otfm); -int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data); -int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err); -int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm); - static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) { @@ -164,10 +162,12 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) return alg; } +void crypto_destroy_alg(struct crypto_alg *alg); + static inline void crypto_alg_put(struct crypto_alg *alg) { - if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) - alg->cra_destroy(alg); + if (refcount_dec_and_test(&alg->cra_refcnt)) + crypto_destroy_alg(alg); } static inline int crypto_tmpl_get(struct crypto_template *tmpl) diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 76edbf8af0ac..c24d4ff2b4a8 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -61,8 +61,7 @@ void *jent_kvzalloc(unsigned int len) void jent_kvzfree(void *ptr, unsigned int len) { - memzero_explicit(ptr, len); - kvfree(ptr); + kvfree_sensitive(ptr, len); } void *jent_zalloc(unsigned int len) diff --git a/crypto/jitterentropy-testing.c b/crypto/jitterentropy-testing.c index 5cb6a77b8e3b..21c9d7c3269a 100644 --- a/crypto/jitterentropy-testing.c +++ b/crypto/jitterentropy-testing.c @@ -15,7 +15,7 @@ #define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1) struct jent_testing { - u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE]; + u64 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE]; u32 rb_reader; atomic_t rb_writer; atomic_t jent_testing_enabled; @@ -72,7 +72,7 @@ static void jent_testing_fini(struct jent_testing *data, u32 boot) pr_warn("Disabling data collection\n"); } -static bool jent_testing_store(struct jent_testing *data, u32 value, +static bool jent_testing_store(struct jent_testing *data, u64 value, u32 *boot) { unsigned long flags; @@ -156,20 +156,20 @@ static int jent_testing_reader(struct jent_testing *data, u32 *boot, } /* We copy out word-wise */ - if (outbuflen < sizeof(u32)) { + if (outbuflen < sizeof(u64)) { spin_unlock_irqrestore(&data->lock, flags); goto out; } memcpy(outbuf, &data->jent_testing_rb[data->rb_reader], - sizeof(u32)); + sizeof(u64)); data->rb_reader++; spin_unlock_irqrestore(&data->lock, flags); - outbuf += sizeof(u32); - outbuflen -= sizeof(u32); - collected_data += sizeof(u32); + outbuf += sizeof(u64); + outbuflen -= sizeof(u64); + collected_data += sizeof(u64); } out: @@ -189,16 +189,17 @@ static int jent_testing_extract_user(struct file *file, char __user *buf, /* * The intention of this interface is for collecting at least - * 1000 samples due to the SP800-90B requirements. So, we make no - * effort in avoiding allocating more memory that actually needed - * by the user. Hence, we allocate sufficient memory to always hold - * that amount of data. + * 1000 samples due to the SP800-90B requirements. However, due to + * memory and performance constraints, it is not desirable to allocate + * 8000 bytes of memory. Instead, we allocate space for only 125 + * samples, which will allow the user to collect all 1000 samples using + * 8 calls to this interface. */ - tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL); + tmp = kmalloc(125 * sizeof(u64) + sizeof(u64), GFP_KERNEL); if (!tmp) return -ENOMEM; - tmp_aligned = PTR_ALIGN(tmp, sizeof(u32)); + tmp_aligned = PTR_ALIGN(tmp, sizeof(u64)); while (nbytes) { int i; @@ -212,7 +213,7 @@ static int jent_testing_extract_user(struct file *file, char __user *buf, schedule(); } - i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE); + i = min_t(int, nbytes, 125 * sizeof(u64)); i = reader(tmp_aligned, i); if (i <= 0) { if (i < 0) @@ -251,7 +252,7 @@ static struct jent_testing jent_raw_hires = { .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait) }; -int jent_raw_hires_entropy_store(__u32 value) +int jent_raw_hires_entropy_store(__u64 value) { return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test); } diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 26a9048bc893..3b390bd6c119 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -146,6 +146,7 @@ struct rand_data { #define JENT_ENTROPY_SAFETY_FACTOR 64 #include <linux/fips.h> +#include <linux/minmax.h> #include "jitterentropy.h" /*************************************************************************** @@ -157,8 +158,8 @@ struct rand_data { /* * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B * APT. - * http://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf - * In in the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)). + * https://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf + * In the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)). * (The original formula wasn't correct because the first symbol must * necessarily have been observed, so there is no chance of observing 0 of these * symbols.) @@ -638,10 +639,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -2; } - if ((DATA_SIZE_BITS / 8) < len) - tocopy = (DATA_SIZE_BITS / 8); - else - tocopy = len; + tocopy = min(DATA_SIZE_BITS / 8, len); if (jent_read_random_block(ec->hash_state, p, tocopy)) return -1; diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index aa4728675ae2..4c5dbf2a8d8f 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -22,11 +22,11 @@ extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr, extern void jent_entropy_collector_free(struct rand_data *entropy_collector); #ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE -int jent_raw_hires_entropy_store(__u32 value); +int jent_raw_hires_entropy_store(__u64 value); void jent_testing_init(void); void jent_testing_exit(void); #else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ -static inline int jent_raw_hires_entropy_store(__u32 value) { return 0; } +static inline int jent_raw_hires_entropy_store(__u64 value) { return 0; } static inline void jent_testing_init(void) { } static inline void jent_testing_exit(void) { } #endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c index c3f9938e1ad2..b7a6bf9da773 100644 --- a/crypto/kdf_sp800108.c +++ b/crypto/kdf_sp800108.c @@ -127,7 +127,7 @@ static int __init crypto_kdf108_init(void) { int ret; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return 0; ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", diff --git a/crypto/keywrap.c b/crypto/keywrap.c deleted file mode 100644 index 054d9a216fc9..000000000000 --- a/crypto/keywrap.c +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Key Wrapping: RFC3394 / NIST SP800-38F - * - * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, and the entire permission notice in its entirety, - * including the disclaimer of warranties. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * ALTERNATIVELY, this product may be distributed under the terms of - * the GNU General Public License, in which case the provisions of the GPL2 - * are required INSTEAD OF the above restrictions. (This clause is - * necessary due to a potential bad interaction between the GPL and - * the restrictions contained in a BSD-style copyright.) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF - * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -/* - * Note for using key wrapping: - * - * * The result of the encryption operation is the ciphertext starting - * with the 2nd semiblock. The first semiblock is provided as the IV. - * The IV used to start the encryption operation is the default IV. - * - * * The input for the decryption is the first semiblock handed in as an - * IV. The ciphertext is the data starting with the 2nd semiblock. The - * return code of the decryption operation will be EBADMSG in case an - * integrity error occurs. - * - * To obtain the full result of an encryption as expected by SP800-38F, the - * caller must allocate a buffer of plaintext + 8 bytes: - * - * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm); - * u8 data[datalen]; - * u8 *iv = data; - * u8 *pt = data + crypto_skcipher_ivsize(tfm); - * <ensure that pt contains the plaintext of size ptlen> - * sg_init_one(&sg, pt, ptlen); - * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); - * - * ==> After encryption, data now contains full KW result as per SP800-38F. - * - * In case of decryption, ciphertext now already has the expected length - * and must be segmented appropriately: - * - * unsigned int datalen = CTLEN; - * u8 data[datalen]; - * <ensure that data contains full ciphertext> - * u8 *iv = data; - * u8 *ct = data + crypto_skcipher_ivsize(tfm); - * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm); - * sg_init_one(&sg, ct, ctlen); - * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv); - * - * ==> After decryption (which hopefully does not return EBADMSG), the ct - * pointer now points to the plaintext of size ctlen. - * - * Note 2: KWP is not implemented as this would defy in-place operation. - * If somebody wants to wrap non-aligned data, he should simply pad - * the input with zeros to fill it up to the 8 byte boundary. - */ - -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/scatterlist.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/skcipher.h> - -struct crypto_kw_block { -#define SEMIBSIZE 8 - __be64 A; - __be64 R; -}; - -/* - * Fast forward the SGL to the "end" length minus SEMIBSIZE. - * The start in the SGL defined by the fast-forward is returned with - * the walk variable - */ -static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, - struct scatterlist *sg, - unsigned int end) -{ - unsigned int skip = 0; - - /* The caller should only operate on full SEMIBLOCKs. */ - BUG_ON(end < SEMIBSIZE); - - skip = end - SEMIBSIZE; - while (sg) { - if (sg->length > skip) { - scatterwalk_start(walk, sg); - scatterwalk_advance(walk, skip); - break; - } - - skip -= sg->length; - sg = sg_next(sg); - } -} - -static int crypto_kw_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 6 * ((req->cryptlen) >> 3); - unsigned int i; - int ret = 0; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* Place the IV into block A */ - memcpy(&block.A, req->iv, SEMIBSIZE); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - while (nbytes) { - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&src_walk, src, nbytes); - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t--; - /* perform KW operation: decrypt block */ - crypto_cipher_decrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes); - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* Perform authentication check */ - if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) - ret = -EBADMSG; - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return ret; -} - -static int crypto_kw_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 1; - unsigned int i; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV that occupies the first semiblock. - * This means that the dst memory must be one semiblock larger than src. - * Also ensure that the given data is aligned to semiblock. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* - * Place the predefined IV into block A -- for encrypt, the caller - * does not need to provide an IV, but he needs to fetch the final IV. - */ - block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - scatterwalk_start(&src_walk, src); - scatterwalk_start(&dst_walk, dst); - - while (nbytes) { - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: encrypt block */ - crypto_cipher_encrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t++; - - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* establish the IV for the caller to pick up */ - memcpy(req->iv, &block.A, SEMIBSIZE); - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return 0; -} - -static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct skcipher_instance *inst; - struct crypto_alg *alg; - int err; - - inst = skcipher_alloc_instance_simple(tmpl, tb); - if (IS_ERR(inst)) - return PTR_ERR(inst); - - alg = skcipher_ialg_simple(inst); - - err = -EINVAL; - /* Section 5.1 requirement for KW */ - if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) - goto out_free_inst; - - inst->alg.base.cra_blocksize = SEMIBSIZE; - inst->alg.base.cra_alignmask = 0; - inst->alg.ivsize = SEMIBSIZE; - - inst->alg.encrypt = crypto_kw_encrypt; - inst->alg.decrypt = crypto_kw_decrypt; - - err = skcipher_register_instance(tmpl, inst); - if (err) { -out_free_inst: - inst->free(inst); - } - - return err; -} - -static struct crypto_template crypto_kw_tmpl = { - .name = "kw", - .create = crypto_kw_create, - .module = THIS_MODULE, -}; - -static int __init crypto_kw_init(void) -{ - return crypto_register_template(&crypto_kw_tmpl); -} - -static void __exit crypto_kw_exit(void) -{ - crypto_unregister_template(&crypto_kw_tmpl); -} - -subsys_initcall(crypto_kw_init); -module_exit(crypto_kw_exit); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); -MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)"); -MODULE_ALIAS_CRYPTO("kw"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/khazad.c b/crypto/khazad.c index 70cafe73f974..024264ee9cd1 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -23,7 +23,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define KHAZAD_KEY_SIZE 16 @@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; - /* key is supposed to be 32-bit aligned */ - K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); - K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); + K2 = get_unaligned_be64(&in_key[0]); + K1 = get_unaligned_be64(&in_key[8]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { @@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], - u8 *ciphertext, const u8 *plaintext) + u8 *dst, const u8 *src) { - const __be64 *src = (const __be64 *)plaintext; - __be64 *dst = (__be64 *)ciphertext; int r; u64 state; - state = be64_to_cpu(*src) ^ roundKey[0]; + state = get_unaligned_be64(src) ^ roundKey[0]; for (r = 1; r < KHAZAD_ROUNDS; r++) { state = T0[(int)(state >> 56) ] ^ @@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ roundKey[KHAZAD_ROUNDS]; - *dst = cpu_to_be64(state); + put_unaligned_be64(state, dst); } static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_ctxsize = sizeof (struct khazad_ctx), - .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = KHAZAD_KEY_SIZE, @@ -876,7 +871,7 @@ static void __exit khazad_mod_fini(void) } -subsys_initcall(khazad_mod_init); +module_init(khazad_mod_init); module_exit(khazad_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/kpp.c b/crypto/kpp.c index 33d44e59387f..2e0cefe7a25f 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -66,29 +66,6 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst) kpp->free(kpp); } -static int __maybe_unused crypto_kpp_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct kpp_alg *kpp = __crypto_kpp_alg(alg); - struct crypto_istat_kpp *istat; - struct crypto_stat_kpp rkpp; - - istat = kpp_get_stat(kpp); - - memset(&rkpp, 0, sizeof(rkpp)); - - strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); - - rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt); - rkpp.stat_generate_public_key_cnt = - atomic64_read(&istat->generate_public_key_cnt); - rkpp.stat_compute_shared_secret_cnt = - atomic64_read(&istat->compute_shared_secret_cnt); - rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); -} - static const struct crypto_type crypto_kpp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_kpp_init_tfm, @@ -99,13 +76,11 @@ static const struct crypto_type crypto_kpp_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_kpp_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_kpp_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_KPP, .tfmsize = offsetof(struct crypto_kpp, base), + .algsize = offsetof(struct kpp_alg, base), }; struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) @@ -131,15 +106,11 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp); static void kpp_prepare_alg(struct kpp_alg *alg) { - struct crypto_istat_kpp *istat = kpp_get_stat(alg); struct crypto_alg *base = &alg->base; base->cra_type = &crypto_kpp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_KPP; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); } int crypto_register_kpp(struct kpp_alg *alg) diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig new file mode 100644 index 000000000000..4d0476e13f3c --- /dev/null +++ b/crypto/krb5/Kconfig @@ -0,0 +1,26 @@ +config CRYPTO_KRB5 + tristate "Kerberos 5 crypto" + select CRYPTO_MANAGER + select CRYPTO_KRB5ENC + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_HASH_INFO + select CRYPTO_HMAC + select CRYPTO_CMAC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_CBC + select CRYPTO_CTS + select CRYPTO_AES + select CRYPTO_CAMELLIA + help + Provide a library for provision of Kerberos-5-based crypto. This is + intended for network filesystems to use. + +config CRYPTO_KRB5_SELFTESTS + bool "Kerberos 5 crypto selftests" + depends on CRYPTO_KRB5 + help + Turn on some self-testing for the kerberos 5 crypto functions. These + will be performed on module load or boot, if compiled in. diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile new file mode 100644 index 000000000000..d38890c0b247 --- /dev/null +++ b/crypto/krb5/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for asymmetric cryptographic keys +# + +krb5-y += \ + krb5_kdf.o \ + krb5_api.o \ + rfc3961_simplified.o \ + rfc3962_aes.o \ + rfc6803_camellia.o \ + rfc8009_aes2.o + +krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \ + selftest.o \ + selftest_data.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5.o diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h new file mode 100644 index 000000000000..a59084ffafe8 --- /dev/null +++ b/crypto/krb5/internal.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos5 crypto internals + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/scatterlist.h> +#include <crypto/krb5.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +/* + * Profile used for key derivation and encryption. + */ +struct krb5_crypto_profile { + /* Pseudo-random function */ + int (*calc_PRF)(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp); + + /* Checksum key derivation */ + int (*calc_Kc)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Kc, + gfp_t gfp); + + /* Encryption key derivation */ + int (*calc_Ke)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ke, + gfp_t gfp); + + /* Integrity key derivation */ + int (*calc_Ki)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ki, + gfp_t gfp); + + /* Derive the keys needed for an encryption AEAD object. */ + int (*derive_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for an encryption AEAD object. */ + int (*load_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Derive the key needed for a checksum hash object. */ + int (*derive_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for a checksum hash object. */ + int (*load_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Encrypt data in-place, inserting confounder and checksum. */ + ssize_t (*encrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); + + /* Decrypt data in-place, removing confounder and checksum */ + int (*decrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + + /* Generate a MIC on part of a packet, inserting the checksum */ + ssize_t (*get_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); + + /* Verify the MIC on a piece of data, removing the checksum */ + int (*verify_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +}; + +/* + * Crypto size/alignment rounding convenience macros. + */ +#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN)) + +#define krb5_aead_size(TFM) \ + crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM)) +#define krb5_aead_ivsize(TFM) \ + crypto_roundup(crypto_aead_ivsize(TFM)) +#define krb5_shash_size(TFM) \ + crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM)) +#define krb5_digest_size(TFM) \ + crypto_roundup(crypto_shash_digestsize(TFM)) +#define round16(x) (((x) + 15) & ~15) + +/* + * Self-testing data. + */ +struct krb5_prf_test { + u32 etype; + const char *name, *key, *octet, *prf; +}; + +struct krb5_key_test_one { + u32 use; + const char *key; +}; + +struct krb5_key_test { + u32 etype; + const char *name, *key; + struct krb5_key_test_one Kc, Ke, Ki; +}; + +struct krb5_enc_test { + u32 etype; + u32 usage; + const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct; +}; + +struct krb5_mic_test { + u32 etype; + u32 usage; + const char *name, *plain, *K0, *Kc, *mic; +}; + +/* + * krb5_api.c + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp); +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp); + +/* + * krb5_kdf.c + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); + +/* + * rfc3961_simplified.c + */ +extern const struct krb5_crypto_profile rfc3961_simplified_profile; + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len); +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len); +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +/* + * rfc3962_aes.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96; + +/* + * rfc6803_camellia.c + */ +extern const struct krb5_enctype krb5_camellia128_cts_cmac; +extern const struct krb5_enctype krb5_camellia256_cts_cmac; + +/* + * rfc8009_aes2.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192; + +/* + * selftest.c + */ +#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS +int krb5_selftest(void); +#else +static inline int krb5_selftest(void) { return 0; } +#endif + +/* + * selftest_data.c + */ +extern const struct krb5_prf_test krb5_prf_tests[]; +extern const struct krb5_key_test krb5_key_tests[]; +extern const struct krb5_enc_test krb5_enc_tests[]; +extern const struct krb5_mic_test krb5_mic_tests[]; diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c new file mode 100644 index 000000000000..23026d4206c8 --- /dev/null +++ b/crypto/krb5/krb5_api.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos 5 crypto library. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include "internal.h" + +MODULE_DESCRIPTION("Kerberos 5 crypto"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +static const struct krb5_enctype *const krb5_supported_enctypes[] = { + &krb5_aes128_cts_hmac_sha1_96, + &krb5_aes256_cts_hmac_sha1_96, + &krb5_aes128_cts_hmac_sha256_128, + &krb5_aes256_cts_hmac_sha384_192, + &krb5_camellia128_cts_cmac, + &krb5_camellia256_cts_cmac, +}; + +/** + * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type + * @enctype: The standard Kerberos encryption type number + * + * Look up a Kerberos encryption type by number. If successful, returns a + * pointer to the type tables; returns NULL otherwise. + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype) +{ + const struct krb5_enctype *krb5; + size_t i; + + for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) { + krb5 = krb5_supported_enctypes[i]; + if (krb5->etype == enctype) + return krb5; + } + + return NULL; +} +EXPORT_SYMBOL(crypto_krb5_find_enctype); + +/** + * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @data_size: How much data we want to allow for + * @_offset: Where to place the offset into the buffer + * + * Calculate how much buffer space is required to wrap a given amount of data. + * This allows for a confounder, padding and checksum as appropriate. The + * amount of buffer required is returned and the offset into the buffer at + * which the data will start is placed in *_offset. + */ +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset = krb5->cksum_len; + return krb5->cksum_len + data_size; + + case KRB5_ENCRYPT_MODE: + *_offset = krb5->conf_len; + return krb5->conf_len + data_size + krb5->cksum_len; + + default: + WARN_ON(1); + *_offset = 0; + return 0; + } +} +EXPORT_SYMBOL(crypto_krb5_how_much_buffer); + +/** + * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @_buffer_size: How much buffer we want to allow for (may be reduced) + * @_offset: Where to place the offset into the buffer + * + * Calculate how much data can be fitted into given amount of buffer. This + * allows for a confounder, padding and checksum as appropriate. The amount of + * data that will fit is returned, the amount of buffer required is shrunk to + * allow for alignment and the offset into the buffer at which the data will + * start is placed in *_offset. + */ +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset) +{ + size_t buffer_size = *_buffer_size, data_size; + + switch (mode) { + case KRB5_CHECKSUM_MODE: + if (WARN_ON(buffer_size < krb5->cksum_len + 1)) + goto bad; + *_offset = krb5->cksum_len; + return buffer_size - krb5->cksum_len; + + case KRB5_ENCRYPT_MODE: + if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len)) + goto bad; + data_size = buffer_size - krb5->cksum_len; + *_offset = krb5->conf_len; + return data_size - krb5->conf_len; + + default: + WARN_ON(1); + goto bad; + } + +bad: + *_offset = 0; + return 0; +} +EXPORT_SYMBOL(crypto_krb5_how_much_data); + +/** + * crypto_krb5_where_is_the_data - Find the data in a decrypted message + * @krb5: The encoding to use. + * @mode: Mode of operation + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Find the offset and size of the data in a secure message so that this + * information can be used in the metadata buffer which will get added to the + * digest by crypto_krb5_verify_mic(). + */ +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + return; + case KRB5_ENCRYPT_MODE: + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + return; + default: + WARN_ON_ONCE(1); + return; + } +} +EXPORT_SYMBOL(crypto_krb5_where_is_the_data); + +/* + * Prepare the encryption with derived key data. + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_aead_setkey(ci, keys->data, keys->len); + if (ret < 0) { + pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + ret = crypto_aead_setauthsize(ci, krb5->cksum_len); + if (ret < 0) { + pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_aead(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch encrypt and decrypt operations. + */ +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp); + if (ret < 0) + goto err; + + ci = krb5_prepare_encryption(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_encryption); + +/* + * Prepare the checksum with derived key data. + */ +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_shash_setkey(ci, Kc->data, Kc->len); + if (ret < 0) { + pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_shash(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch get_mic and verify_mic operations. + */ +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp); + if (ret < 0) { + pr_err("get_Kc failed %d\n", ret); + goto err; + } + + ci = krb5_prepare_checksum(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_checksum); + +/** + * crypto_krb5_encrypt - Apply Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * @preconfounded: True if the confounder is already inserted. + * + * Using the specified Kerberos encoding, insert a confounder and padding as + * needed, encrypt this and the data in place and insert an integrity checksum + * into the buffer. + * + * The buffer must include space for the confounder, the checksum and any + * padding required. The caller can preinsert the confounder into the buffer + * (for testing, for example). + * + * The resulting secured blob may be less than the size of the buffer. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder + * is too short or the data is misaligned. Other errors may also be returned + * from the crypto layer. + */ +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len, + data_offset, data_len, preconfounded); +} +EXPORT_SYMBOL(crypto_krb5_encrypt); + +/** + * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum and decrypt the secure region, stripping off the confounder. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data plus the trailing padding are stored. The caller is responsible + * for working out how much padding there is and removing it. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the integrity checksum doesn't match). Other errors may also be returned + * from the crypto layer. + */ +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_decrypt); + +/** + * crypto_krb5_get_mic - Apply Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * + * Using the specified Kerberos encoding, calculate and insert an integrity + * checksum into the buffer. + * + * The buffer must include space for the checksum at the front. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for + * the checksum is too short. Other errors may also be returned from the + * crypto layer. + */ +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len, + data_offset, data_len); +} +EXPORT_SYMBOL(crypto_krb5_get_mic); + +/** + * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data is stored. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the checksum doesn't match). Other errors may also be returned from the + * crypto layer. + */ +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg, + _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_verify_mic); + +static int __init crypto_krb5_init(void) +{ + return krb5_selftest(); +} +module_init(crypto_krb5_init); + +static void __exit crypto_krb5_exit(void) +{ +} +module_exit(crypto_krb5_exit); diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c new file mode 100644 index 000000000000..6699e5469d1b --- /dev/null +++ b/crypto/krb5/krb5_kdf.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos key derivation. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/export.h> +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/** + * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402] + * @krb5: The encryption type to use + * @K: The protocol key for the pseudo-random function + * @L: The length of the output + * @S: The input octet string + * @result: Result buffer, sized to krb5->prf_len + * @gfp: Allocation restrictions + * + * Calculate the kerberos pseudo-random function, PRF+() by the following + * method: + * + * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn) + * Tn = PRF(K, n || S) + * [rfc4402 sec 2] + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct krb5_buffer T_series, Tn, n_S; + void *buffer; + int ret, n = 1; + + Tn.len = krb5->prf_len; + T_series.len = 0; + n_S.len = 4 + S->len; + + buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp); + if (!buffer) + return -ENOMEM; + + T_series.data = buffer; + n_S.data = buffer + round16(L + Tn.len); + memcpy(n_S.data + 4, S->data, S->len); + + while (T_series.len < L) { + *(__be32 *)(n_S.data) = htonl(n); + Tn.data = T_series.data + Tn.len * (n - 1); + ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp); + if (ret < 0) + goto err; + T_series.len += Tn.len; + n++; + } + + /* Truncate to L */ + memcpy(result->data, T_series.data, L); + ret = 0; + +err: + kfree_sensitive(buffer); + return ret; +} +EXPORT_SYMBOL(crypto_krb5_calc_PRFplus); + +/** + * krb5_derive_Kc - Derive key Kc and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Kc checksumming key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_CHECKSUM; + + key->len = krb5->Kc_len; + return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ke - Derive key Ke and install into an skcipher + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ke encryption key. The key is stored into the prepared + * buffer. + */ +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_ENCRYPTION; + + key->len = krb5->Ke_len; + return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ki - Derive key Ki and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ki integrity checksum key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_INTEGRITY; + + key->len = krb5->Ki_len; + return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp); +} diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c new file mode 100644 index 000000000000..e49cbdec7c40 --- /dev/null +++ b/crypto/krb5/rfc3961_simplified.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3961 Kerberos 5 simplified crypto profile. + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/lcm.h> +#include <linux/rtnetlink.h> +#include <crypto/authenc.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/* Maximum blocksize for the supported crypto algorithms */ +#define KRB5_MAX_BLOCKSIZE (16) + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len) +{ + struct sg_mapping_iter miter; + size_t i, n; + int ret = 0; + + sg_miter_start(&miter, sg, sg_nents(sg), + SG_MITER_FROM_SG | SG_MITER_LOCAL); + sg_miter_skip(&miter, offset); + for (i = 0; i < len; i += n) { + sg_miter_next(&miter); + n = min(miter.length, len - i); + ret = crypto_shash_update(desc, miter.addr, n); + if (ret < 0) + break; + } + sg_miter_stop(&miter); + return ret; +} + +static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv, + const struct krb5_buffer *in, struct krb5_buffer *out) +{ + struct scatterlist sg[1]; + u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0}; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + int ret; + + if (WARN_ON(in->len != out->len)) + return -EINVAL; + if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0) + return -EINVAL; + + if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE) + return -EINVAL; + + if (iv) + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); + + memcpy(out->data, in->data, out->len); + sg_init_one(sg, out->data, out->len); + + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg, sg, out->len, local_iv); + + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + return ret; +} + +/* + * Calculate an unkeyed basic hash. + */ +static int rfc3961_calc_H(const struct krb5_enctype *krb5, + const struct krb5_buffer *data, + struct krb5_buffer *digest, + gfp_t gfp) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t desc_size; + int ret = -ENOMEM; + + tfm = crypto_alloc_shash(krb5->hash_name, 0, 0); + if (IS_ERR(tfm)) + return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + + desc = kzalloc(desc_size, gfp); + if (!desc) + goto error_tfm; + + digest->len = crypto_shash_digestsize(tfm); + digest->data = kzalloc(digest->len, gfp); + if (!digest->data) + goto error_desc; + + desc->tfm = tfm; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error_digest; + + ret = crypto_shash_finup(desc, data->data, data->len, digest->data); + if (ret < 0) + goto error_digest; + + goto error_desc; + +error_digest: + kfree_sensitive(digest->data); +error_desc: + kfree_sensitive(desc); +error_tfm: + crypto_free_shash(tfm); + return ret; +} + +/* + * This is the n-fold function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ +static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result) +{ + const u8 *in = source->data; + u8 *out = result->data; + unsigned long ulcm; + unsigned int inbits, outbits; + int byte, i, msbit; + + /* the code below is more readable if I make these bytes instead of bits */ + inbits = source->len; + outbits = result->len; + + /* first compute lcm(n,k) */ + ulcm = lcm(inbits, outbits); + + /* now do the real work */ + memset(out, 0, outbits); + byte = 0; + + /* this will end up cycling through k lcm(k,n)/k times, which + * is correct. + */ + for (i = ulcm-1; i >= 0; i--) { + /* compute the msbit in k which gets added into this byte */ + msbit = ( + /* first, start with the msbit in the first, + * unrotated byte + */ + ((inbits << 3) - 1) + + /* then, for each byte, shift to the right + * for each repetition + */ + (((inbits << 3) + 13) * (i/inbits)) + + /* last, pick out the correct byte within + * that shifted repetition + */ + ((inbits - (i % inbits)) << 3) + ) % (inbits << 3); + + /* pull out the byte value itself */ + byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) | + (in[((inbits) - (msbit >> 3)) % inbits])) + >> ((msbit & 7) + 1)) & 0xff; + + /* do the addition */ + byte += out[i % outbits]; + out[i % outbits] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + + /* if there's a carry bit left over, add it back in */ + if (byte) { + for (i = outbits - 1; i >= 0; i--) { + /* do the addition */ + byte += out[i]; + out[i] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + } +} + +/* + * Calculate a derived key, DK(Base Key, Well-Known Constant) + * + * DK(Key, Constant) = random-to-key(DR(Key, Constant)) + * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)) + * K1 = E(Key, n-fold(Constant), initial-cipher-state) + * K2 = E(Key, K1, initial-cipher-state) + * K3 = E(Key, K2, initial-cipher-state) + * K4 = ... + * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...) + * [rfc3961 sec 5.1] + */ +static int rfc3961_calc_DK(const struct krb5_enctype *krb5, + const struct krb5_buffer *inkey, + const struct krb5_buffer *in_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + unsigned int blocksize, keybytes, keylength, n; + struct krb5_buffer inblock, outblock, rawkey; + struct crypto_sync_skcipher *cipher; + int ret = -EINVAL; + + blocksize = krb5->block_len; + keybytes = krb5->key_bytes; + keylength = krb5->key_len; + + if (inkey->len != keylength || result->len != keylength) + return -EINVAL; + if (!krb5->random_to_key && result->len != keybytes) + return -EINVAL; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err_return; + } + ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len); + if (ret < 0) + goto err_free_cipher; + + ret = -ENOMEM; + inblock.data = kzalloc(blocksize * 2 + keybytes, gfp); + if (!inblock.data) + goto err_free_cipher; + + inblock.len = blocksize; + outblock.data = inblock.data + blocksize; + outblock.len = blocksize; + rawkey.data = outblock.data + blocksize; + rawkey.len = keybytes; + + /* initialize the input block */ + + if (in_constant->len == inblock.len) + memcpy(inblock.data, in_constant->data, inblock.len); + else + rfc3961_nfold(in_constant, &inblock); + + /* loop encrypting the blocks until enough key bytes are generated */ + n = 0; + while (n < rawkey.len) { + rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock); + + if (keybytes - n <= outblock.len) { + memcpy(rawkey.data + n, outblock.data, keybytes - n); + break; + } + + memcpy(rawkey.data + n, outblock.data, outblock.len); + memcpy(inblock.data, outblock.data, outblock.len); + n += outblock.len; + } + + /* postprocess the key */ + if (!krb5->random_to_key) { + /* Identity random-to-key function. */ + memcpy(result->data, rawkey.data, rawkey.len); + ret = 0; + } else { + ret = krb5->random_to_key(krb5, &rawkey, result); + } + + kfree_sensitive(inblock.data); +err_free_cipher: + crypto_free_sync_skcipher(cipher); +err_return: + return ret; +} + +/* + * Calculate single encryption, E() + * + * E(Key, octets) + */ +static int rfc3961_calc_E(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *in_data, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_sync_skcipher *cipher; + int ret; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err; + } + + ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len); + if (ret < 0) + goto err_free; + + ret = rfc3961_do_encrypt(cipher, NULL, in_data, result); + +err_free: + crypto_free_sync_skcipher(cipher); +err: + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * tmp1 = H(octet-string) + * tmp2 = truncate tmp1 to multiple of m + * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc3961 sec 5.3] + */ +static int rfc3961_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct krb5_buffer derived_key; + struct krb5_buffer tmp1, tmp2; + unsigned int m = krb5->block_len; + void *buffer; + int ret; + + if (result->len != krb5->prf_len) + return -EINVAL; + + tmp1.len = krb5->hash_len; + derived_key.len = krb5->key_bytes; + buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp); + if (!buffer) + return -ENOMEM; + + tmp1.data = buffer; + derived_key.data = buffer + round16(tmp1.len); + + ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp); + if (ret < 0) + goto err; + + tmp2.len = tmp1.len & ~(m - 1); + tmp2.data = tmp1.data; + + ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp); + if (ret < 0) + goto err; + + ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp); + +err: + kfree_sensitive(buffer); + return ret; +} + +/* + * Derive the Ke and Ki keys and package them into a key parameter that can be + * given to the setkey of a authenc AEAD crypto object. + */ +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct krb5_buffer Ke, Ki; + struct rtattr *rta; + int ret; + + Ke.len = krb5->Ke_len; + Ki.len = krb5->Ki_len; + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke.len); + + Ki.data = (void *)(param + 1); + Ke.data = Ki.data + Ki.len; + + ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp); + if (ret < 0) { + pr_err("get_Ke failed %d\n", ret); + return ret; + } + ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp); + if (ret < 0) + pr_err("get_Ki failed %d\n", ret); + return ret; +} + +/* + * Package predefined Ke and Ki keys and into a key parameter that can be given + * to the setkey of an authenc AEAD crypto object. + */ +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct rtattr *rta; + + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke->len); + memcpy((void *)(param + 1), Ki->data, Ki->len); + memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len); + return 0; +} + +/* + * Derive the Kc key for checksum-only mode and package it into a key parameter + * that can be given to the setkey of a hash crypto object. + */ +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + int ret; + + setkey->len = krb5->Kc_len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp); + if (ret < 0) + pr_err("get_Kc failed %d\n", ret); + return ret; +} + +/* + * Package a predefined Kc key for checksum-only mode into a key parameter that + * can be given to the setkey of a hash crypto object. + */ +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + setkey->len = krb5->Kc_len; + setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + return 0; +} + +/* + * Apply encryption and checksumming functions to part of a scatterlist. + */ +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* Hash and encrypt the message. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. The offset and + * length are updated to reflect the actual content of the encrypted region. + */ +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + size_t bsize; + void *buffer; + int ret; + u8 *iv; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Decrypt the message and verify its checksum. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Generate a checksum over some metadata and part of an skbuff and insert the + * MIC into the skbuff immediately prior to the data. + */ +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len) +{ + struct shash_desc *desc; + ssize_t ret, done; + size_t bsize; + void *buffer, *digest; + + if (WARN_ON(data_offset != krb5->cksum_len)) + return -EMSGSIZE; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Calculate the MIC with key Kc and store it into the skb */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + ret = crypto_shash_update_sg(desc, sg, data_offset, data_len); + if (ret < 0) + goto error; + + digest = buffer + krb5_shash_size(shash); + ret = crypto_shash_final(desc, digest); + if (ret < 0) + goto error; + + ret = -EFAULT; + done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len, + data_offset - krb5->cksum_len); + if (done != krb5->cksum_len) + goto error; + + ret = krb5->cksum_len + data_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Check the MIC on a region of an skbuff. The offset and length are updated + * to reflect the actual content of the secure region. + */ +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct shash_desc *desc; + ssize_t done; + size_t bsize, data_offset, data_len, offset = *_offset, len = *_len; + void *buffer = NULL; + int ret; + u8 *cksum, *cksum2; + + if (len < krb5->cksum_len) + return -EPROTO; + data_offset = offset + krb5->cksum_len; + data_len = len - krb5->cksum_len; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + cksum = buffer + + krb5_shash_size(shash); + cksum2 = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + /* Calculate the MIC */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + crypto_shash_update_sg(desc, sg, data_offset, data_len); + crypto_shash_final(desc, cksum); + + ret = -EFAULT; + done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset); + if (done != krb5->cksum_len) + goto error; + + if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) { + ret = -EBADMSG; + goto error; + } + + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +const struct krb5_crypto_profile rfc3961_simplified_profile = { + .calc_PRF = rfc3961_calc_PRF, + .calc_Kc = rfc3961_calc_DK, + .calc_Ke = rfc3961_calc_DK, + .calc_Ki = rfc3961_calc_DK, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c new file mode 100644 index 000000000000..5cbf8f4638b9 --- /dev/null +++ b/crypto/krb5/rfc3962_aes.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5 + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128, + .name = "aes128-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256, + .name = "aes256-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c new file mode 100644 index 000000000000..77cd4ce023f1 --- /dev/null +++ b/crypto/krb5/rfc6803_camellia.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc6803 Camellia Encryption for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include "internal.h" + +/* + * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant) + * + * n = ceiling(k / 128) + * K(0) = zeros + * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k) + * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n)) + * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant)) + * + * [rfc6803 sec 3] + */ +static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize, offset, seg; + void *buffer; + u32 i = 0, k = result->len * 8; + u8 *p; + int ret = -ENOMEM; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -ENOMEM; + K.len = crypto_shash_digestsize(shash); + data.len = K.len + 4 + constant->len + 1 + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + + K.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len); + + p = data.data + K.len + 4; + memcpy(p, constant->data, constant->len); + p += constant->len; + *p++ = 0x00; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + offset = 0; + do { + i++; + p = data.data; + memcpy(p, K.data, K.len); + p += K.len; + *(__be32 *)p = htonl(i); + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + ret = crypto_shash_finup(desc, data.data, data.len, K.data); + if (ret < 0) + goto error; + + seg = min_t(size_t, result->len - offset, K.len); + memcpy(result->data + offset, K.data, seg); + offset += seg; + } while (offset < result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf") + * PRF = CMAC(Kp, octet-string) + * [rfc6803 sec 6] + */ +static int rfc6803_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct crypto_shash *shash; + struct krb5_buffer Kp; + struct shash_desc *desc; + size_t bsize; + void *buffer; + int ret; + + Kp.len = krb5->prf_len; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + + ret = -EINVAL; + if (result->len != crypto_shash_digestsize(shash)) + goto out_shash; + + ret = -ENOMEM; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(Kp.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto out_shash; + + Kp.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant, + &Kp, gfp); + if (ret < 0) + goto out; + + ret = crypto_shash_setkey(shash, Kp.data, Kp.len); + if (ret < 0) + goto out; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto out; + + ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data); + if (ret < 0) + goto out; + +out: + kfree_sensitive(buffer); +out_shash: + crypto_free_shash(shash); + return ret; +} + + +static const struct krb5_crypto_profile rfc6803_crypto_profile = { + .calc_PRF = rfc6803_calc_PRF, + .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_camellia128_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128, + .name = "camellia128-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; + +const struct krb5_enctype krb5_camellia256_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256, + .name = "camellia256-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c new file mode 100644 index 000000000000..d39851fc3a4e --- /dev/null +++ b/crypto/krb5/rfc8009_aes2.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/authenc.h> +#include "internal.h" + +static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" }; + +/* + * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k) + * + * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1) + * + * Using the appropriate one of: + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k) + * [rfc8009 sec 3] + */ +static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *label, + const struct krb5_buffer *context, + unsigned int k, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K1, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize; + void *buffer; + u8 *p; + int ret = -ENOMEM; + + if (WARN_ON(result->len != k / 8)) + return -EINVAL; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -EINVAL; + if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k)) + goto error_shash; + + ret = -ENOMEM; + data.len = 4 + label->len + 1 + context->len + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + p = data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + *(__be32 *)p = htonl(0x00000001); + p += 4; + memcpy(p, label->data, label->len); + p += label->len; + *p++ = 0; + memcpy(p, context->data, context->len); + p += context->len; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + K1.len = crypto_shash_digestsize(shash); + K1.data = buffer + + krb5_shash_size(shash); + + ret = crypto_shash_finup(desc, data.data, data.len, K1.data); + if (ret < 0) + goto error; + + memcpy(result->data, K1.data, result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256) + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc8009 sec 5] + */ +static int rfc8009_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *input_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + + return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant, + octet_string, krb5->prf_len * 8, + result, gfp); +} + +/* + * Derive Ke. + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128) + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ke(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->key_bytes * 8, + result, gfp); +} + +/* + * Derive Kc/Ki + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128) + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ki(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->cksum_len * 8, + result, gfp); +} + +/* + * Apply encryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + */ +static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv, *ad; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Hash and encrypt the message. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + * + * The offset and length are updated to reflect the actual content of the + * encrypted region. + */ +static int rfc8009_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + size_t bsize; + void *buffer; + int ret; + u8 *iv, *ad; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Decrypt the message and verify its checksum. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +static const struct krb5_crypto_profile rfc8009_crypto_profile = { + .calc_PRF = rfc8009_calc_PRF, + .calc_Kc = rfc8009_calc_Ki, + .calc_Ke = rfc8009_calc_Ke, + .calc_Ki = rfc8009_calc_Ki, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = rfc8009_encrypt, + .decrypt = rfc8009_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128, + .name = "aes128-cts-hmac-sha256-128", + .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))", + .cksum_name = "hmac(sha256)", + .hash_name = "sha256", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 20, + .prf_len = 32, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256, + .name = "aes256-cts-hmac-sha384-192", + .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))", + .cksum_name = "hmac(sha384)", + .hash_name = "sha384", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 24, + .Ke_len = 32, + .Ki_len = 24, + .block_len = 16, + .conf_len = 16, + .cksum_len = 24, + .hash_len = 20, + .prf_len = 48, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c new file mode 100644 index 000000000000..2a81a6315a0d --- /dev/null +++ b/crypto/krb5/selftest.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +#define VALID(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +#define CHECK(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +enum which_key { + TEST_KC, TEST_KE, TEST_KI, +}; + +#if 0 +static void dump_sg(struct scatterlist *sg, unsigned int limit) +{ + unsigned int index = 0, n = 0; + + for (; sg && limit > 0; sg = sg_next(sg)) { + unsigned int off = sg->offset, len = umin(sg->length, limit); + const void *p = kmap_local_page(sg_page(sg)); + + limit -= len; + while (len > 0) { + unsigned int part = umin(len, 32); + + pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off); + index += part; + off += part; + len -= part; + } + + kunmap_local(p); + n++; + } +} +#endif + +static int prep_buf(struct krb5_buffer *buf) +{ + buf->data = kmalloc(buf->len, GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + return 0; +} + +#define PREP_BUF(BUF, LEN) \ + do { \ + (BUF)->len = (LEN); \ + ret = prep_buf((BUF)); \ + if (ret < 0) \ + goto out; \ + } while (0) + +static int load_buf(struct krb5_buffer *buf, const char *from) +{ + size_t len = strlen(from); + int ret; + + if (len > 1 && from[0] == '\'') { + PREP_BUF(buf, len - 1); + memcpy(buf->data, from + 1, len - 1); + ret = 0; + goto out; + } + + if (VALID(len & 1)) + return -EINVAL; + + PREP_BUF(buf, len / 2); + ret = hex2bin(buf->data, from, buf->len); + if (ret < 0) { + VALID(1); + goto out; + } +out: + return ret; +} + +#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0) + +static void clear_buf(struct krb5_buffer *buf) +{ + kfree(buf->data); + buf->len = 0; + buf->data = NULL; +} + +/* + * Perform a pseudo-random function check. + */ +static int krb5_test_one_prf(const struct krb5_prf_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer key = {}, octet = {}, result = {}, prf = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&key, test->key); + LOAD_BUF(&octet, test->octet); + LOAD_BUF(&prf, test->prf); + PREP_BUF(&result, krb5->prf_len); + + if (VALID(result.len != prf.len)) { + ret = -EINVAL; + goto out; + } + + ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL); + if (ret < 0) { + CHECK(1); + pr_warn("PRF calculation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, prf.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&result); + clear_buf(&octet); + clear_buf(&key); + return ret; +} + +/* + * Perform a key derivation check. + */ +static int krb5_test_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_key_test_one *test, + enum which_key which) +{ + struct krb5_buffer key = {}, result = {}; + int ret; + + LOAD_BUF(&key, test->key); + PREP_BUF(&result, key.len); + + switch (which) { + case TEST_KC: + ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KE: + ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KI: + ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + default: + VALID(1); + ret = -EINVAL; + goto out; + } + + if (ret < 0) { + CHECK(1); + pr_warn("Key derivation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, key.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + +out: + clear_buf(&key); + clear_buf(&result); + return ret; +} + +static int krb5_test_one_key(const struct krb5_key_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer base_key = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&base_key, test->key); + + ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI); + if (ret < 0) + goto out; + +out: + clear_buf(&base_key); + return ret; +} + +/* + * Perform an encryption test. + */ +static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_aead *ci = NULL; + struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {}; + struct krb5_buffer conf = {}, plain = {}, ct = {}; + struct scatterlist sg[1]; + size_t data_len, data_offset, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Load the test data into binary buffers. */ + LOAD_BUF(&conf, test->conf); + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&ct, test->ct); + + if (test->K0) { + LOAD_BUF(&K0, test->K0); + } else { + LOAD_BUF(&Ke, test->Ke); + LOAD_BUF(&Ki, test->Ki); + + ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + } + + if (VALID(conf.len != krb5->conf_len) || + VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len)) + goto out; + + data_len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE, + data_len, &data_offset); + + if (CHECK(message_len != ct.len)) { + pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len); + goto out; + } + if (CHECK(data_offset != conf.len)) { + pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len); + goto out; + } + + memcpy(buf, conf.data, conf.len); + memcpy(buf + data_offset, plain.data, plain.len); + + /* Allocate a crypto object and set its key. */ + if (test->K0) + ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL); + else + ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL); + + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret); + goto out; + } + + /* Encrypt the message. */ + sg_init_one(sg, buf, message_len); + ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len, + data_offset, data_len, true); + if (ret < 0) { + CHECK(1); + pr_warn("Encryption failed %d\n", ret); + goto out; + } + if (ret != message_len) { + CHECK(1); + pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len); + goto out; + } + + if (memcmp(buf, ct.data, ct.len) != 0) { + CHECK(1); + pr_warn("Ciphertext mismatch\n"); + pr_warn("BUF %*phN\n", ct.len, buf); + pr_warn("CT %*phN\n", ct.len, ct.data); + pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Decrypt the encrypted message. */ + data_offset = 0; + data_len = message_len; + ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len); + if (ret < 0) { + CHECK(1); + pr_warn("Decryption failed %d\n", ret); + goto out; + } + + if (CHECK(data_offset != conf.len) || + CHECK(data_len != plain.len)) + goto out; + + if (memcmp(buf, conf.data, conf.len) != 0) { + CHECK(1); + pr_warn("Confounder mismatch\n"); + pr_warn("ENC %*phN\n", conf.len, buf); + pr_warn("DEC %*phN\n", conf.len, conf.data); + ret = -EKEYREJECTED; + goto out; + } + + if (memcmp(buf + conf.len, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + conf.len); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&ct); + clear_buf(&plain); + clear_buf(&conf); + clear_buf(&keys); + clear_buf(&Ki); + clear_buf(&Ke); + clear_buf(&K0); + if (ci) + crypto_free_aead(ci); + return ret; +} + +/* + * Perform a checksum test. + */ +static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_shash *ci = NULL; + struct scatterlist sg[1]; + struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {}; + size_t offset, len, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Allocate a crypto object and set its key. */ + if (test->K0) { + LOAD_BUF(&K0, test->K0); + ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL); + } else { + LOAD_BUF(&Kc, test->Kc); + + ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + + ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL); + } + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret); + goto out; + } + + /* Load the test data into binary buffers. */ + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&mic, test->mic); + + len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE, + len, &offset); + + if (CHECK(message_len != mic.len + plain.len)) { + pr_warn("MIC length mismatch %zu != %u\n", + message_len, mic.len + plain.len); + goto out; + } + + memcpy(buf + offset, plain.data, plain.len); + + /* Generate a MIC generation request. */ + sg_init_one(sg, buf, 1024); + + ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024, + krb5->cksum_len, plain.len); + if (ret < 0) { + CHECK(1); + pr_warn("Get MIC failed %d\n", ret); + goto out; + } + len = ret; + + if (CHECK(len != plain.len + mic.len)) { + pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len); + goto out; + } + + if (memcmp(buf, mic.data, mic.len) != 0) { + CHECK(1); + pr_warn("MIC mismatch\n"); + pr_warn("BUF %*phN\n", mic.len, buf); + pr_warn("MIC %*phN\n", mic.len, mic.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Generate a verification request. */ + offset = 0; + ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len); + if (ret < 0) { + CHECK(1); + pr_warn("Verify MIC failed %d\n", ret); + goto out; + } + + if (CHECK(offset != mic.len) || + CHECK(len != plain.len)) + goto out; + + if (memcmp(buf + offset, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + offset); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&mic); + clear_buf(&plain); + clear_buf(&keys); + clear_buf(&K0); + clear_buf(&Kc); + if (ci) + crypto_free_shash(ci); + return ret; +} + +int krb5_selftest(void) +{ + void *buf; + int ret = 0, i; + + buf = kmalloc(4096, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pr_notice("\n"); + pr_notice("Running selftests\n"); + + for (i = 0; krb5_prf_tests[i].name; i++) { + ret = krb5_test_one_prf(&krb5_prf_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_prf_tests[i].name); + } + } + + for (i = 0; krb5_key_tests[i].name; i++) { + ret = krb5_test_one_key(&krb5_key_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_key_tests[i].name); + } + } + + for (i = 0; krb5_enc_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_enc(&krb5_enc_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_enc_tests[i].name); + } + } + + for (i = 0; krb5_mic_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_mic(&krb5_mic_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_mic_tests[i].name); + } + } + + ret = 0; +out: + pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed"); + kfree(buf); + return ret; +} diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c new file mode 100644 index 000000000000..24447ee8bf07 --- /dev/null +++ b/crypto/krb5/selftest_data.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Data for Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +/* + * Pseudo-random function tests. + */ +const struct krb5_prf_test krb5_prf_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "prf", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .octet = "74657374", + .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "prf", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .octet = "74657374", + .prf = + "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D" + "B3C1B62AD1B8553360D17367EB1514D2", + }, + {/* END */} +}; + +/* + * Key derivation tests. + */ +const struct krb5_key_test krb5_key_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "key", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .Kc.use = 0x00000002, + .Kc.key = "B31A018A48F54776F403E9A396325DC3", + .Ke.use = 0x00000002, + .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki.use = 0x00000002, + .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "key", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .Kc.use = 0x00000002, + .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .Ke.use = 0x00000002, + .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki.use = 0x00000002, + .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "key", + .key = "57D0297298FFD9D35DE5A47FB4BDE24B", + .Kc.use = 0x00000002, + .Kc.key = "D155775A209D05F02B38D42A389E5A56", + .Ke.use = 0x00000002, + .Ke.key = "64DF83F85A532F17577D8C37035796AB", + .Ki.use = 0x00000002, + .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635", + }, + { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "key", + .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C", + .Kc.use = 0x00000002, + .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50", + .Ke.use = 0x00000002, + .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604", + .Ki.use = 0x00000002, + .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0", + }, + {/* END */} +}; + +/* + * Encryption tests. + */ +const struct krb5_enc_test krb5_enc_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc no plain", + .plain = "", + .conf = "7E5895EAF2672435BAD817F545A37148", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "56AB21713FF62C0A1457200F6FA9948F", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "A7A4E29A4728CE10664FB64E49AD3FAC", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc no plain", + .plain = "", + .conf = "F764E9FA15C276478B2C7D0C4E5F58E4", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "B80D3251C1F6471494256FFE712D0B9A", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "53BF8A0D105265D4E276428624CE5E63", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "763E65367E864F02F55153C7E3B58AF1", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 0, + .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "6F2FC3C2A166FD8898967A83DE9596D9", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 1, + .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "A5B4A71E077AEEF93C8763C18FDB1F10", + .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0", + .usage = 2, + .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "19FEE40D810C524B5B22F01874C693DA", + .K0 = "2CA27A5FAF5532244506434E1CEF6676", + .usage = 3, + .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "CA7A7AB4BE192DABD603506DB19C39E2", + .K0 = "7824F8C16F83FF354C6BF7515B973F43", + .usage = 4, + .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "3CBBD2B45917941067F96599BB98926C", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 0, + .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2", + .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833", + .usage = 1, + .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "AD4FF904D34E555384B14100FC465F88", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 2, + .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514", + .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715", + .usage = 3, + .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "644DEF38DA35007275878D216855E228", + .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7", + .usage = 4, + .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474", + }, + {/* END */} +}; + +/* + * Checksum generation tests. + */ +const struct krb5_mic_test krb5_mic_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "B31A018A48F54776F403E9A396325DC3", + .mic = "D78367186643D67B411CBA9139FC1DEE", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic abc", + .plain = "'abcdefghijk", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 7, + .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic ABC", + .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 8, + .mic = "D1B34F7004A731F23A0C00BF6C3F753A", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic 123", + .plain = "'123456789", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 9, + .mic = "87A12CFD2B96214810F01C826E7744B1", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic !@#", + .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 10, + .mic = "3FA0B42355E52B189187294AA252AB64", + }, + {/* END */} +}; diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c new file mode 100644 index 000000000000..a1de55994d92 --- /dev/null +++ b/crypto/krb5enc.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AEAD wrapper for Kerberos 5 RFC3961 simplified profile. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * Derived from authenc: + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/authenc.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct krb5enc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct krb5enc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; +}; + +struct krb5enc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[]; +}; + +static void krb5enc_request_complete(struct aead_request *req, int err) +{ + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/** + * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob. + * @keys: Where to put the key sizes and pointers + * @key: Encoded key material + * @keylen: Amount of key material + * + * Decode the key blob we're given. It starts with an rtattr that indicates + * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is: + * + * rtattr || __be32 enckeylen || authkey || enckey + * + * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be + * handled correctly in static testmgr data. + */ +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen) +{ + struct rtattr *rta = (struct rtattr *)key; + struct crypto_authenc_key_param *param; + + if (!RTA_OK(rta, keylen)) + return -EINVAL; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + return -EINVAL; + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) + return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); + + param = RTA_DATA(rta); + keys->enckeylen = be32_to_cpu(param->enckeylen); + + key += rta->rta_len; + keylen -= rta->rta_len; + + if (keylen < keys->enckeylen) + return -EINVAL; + + keys->authkeylen = keylen - keys->enckeylen; + keys->authkey = key; + keys->enckey = key + keys->authkeylen; + return 0; +} +EXPORT_SYMBOL(crypto_krb5enc_extractkeys); + +static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct crypto_skcipher *enc = ctx->enc; + struct crypto_ahash *auth = ctx->auth; + unsigned int flags = crypto_aead_get_flags(krb5enc); + int err = -EINVAL; + + if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0) + goto out; + + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); + if (err) + goto out; + + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); +out: + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static void krb5enc_encrypt_done(void *data, int err) +{ + struct aead_request *req = data; + + krb5enc_request_complete(req, err); +} + +/* + * Start the encryption of the plaintext. We skip over the associated data as + * that only gets included in the hash. + */ +static int krb5enc_dispatch_encrypt(struct aead_request *req, + unsigned int flags) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct crypto_skcipher *enc = ctx->enc; + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + if (req->src == req->dst) + dst = src; + else + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + krb5enc_encrypt_done, req); + skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(skreq); +} + +/* + * Insert the hash into the checksum field in the destination buffer directly + * after the encrypted region. + */ +static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + + scatterwalk_map_and_copy(hash, req->dst, + req->assoclen + req->cryptlen, + crypto_aead_authsize(krb5enc), 1); +} + +/* + * Upon completion of an asynchronous digest, transfer the hash to the checksum + * field. + */ +static void krb5enc_encrypt_ahash_done(void *data, int err) +{ + struct aead_request *req = data; + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + + if (err) + return krb5enc_request_complete(req, err); + + krb5enc_insert_checksum(req, ahreq->result); + + err = krb5enc_dispatch_encrypt(req, 0); + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/* + * Start the digest of the plaintext for encryption. In theory, this could be + * run in parallel with the encryption, provided the src and dst buffers don't + * overlap. + */ +static int krb5enc_dispatch_encrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct crypto_ahash *auth = ctx->auth; + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_encrypt_ahash_done, req); + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen); + + err = crypto_ahash_digest(ahreq); + if (err) + return err; + + krb5enc_insert_checksum(req, hash); + return 0; +} + +/* + * Process an encryption operation. We can perform the cipher and the hash in + * parallel, provided the src and dst buffers are separate. + */ +static int krb5enc_encrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_encrypt_hash(req); + if (err < 0) + return err; + + return krb5enc_dispatch_encrypt(req, aead_request_flags(req)); +} + +static int krb5enc_verify_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *calc_hash = areq_ctx->tail; + u8 *msg_hash = areq_ctx->tail + authsize; + + scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0); + + if (crypto_memneq(msg_hash, calc_hash, authsize)) + return -EBADMSG; + return 0; +} + +static void krb5enc_decrypt_hash_done(void *data, int err) +{ + struct aead_request *req = data; + + if (err) + return krb5enc_request_complete(req, err); + + err = krb5enc_verify_hash(req); + krb5enc_request_complete(req, err); +} + +/* + * Dispatch the hashing of the plaintext after we've done the decryption. + */ +static int krb5enc_dispatch_decrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + struct crypto_ahash *auth = ctx->auth; + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->dst, hash, + req->assoclen + req->cryptlen - authsize); + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_decrypt_hash_done, req); + + err = crypto_ahash_digest(ahreq); + if (err < 0) + return err; + + return krb5enc_verify_hash(req); +} + +/* + * Dispatch the decryption of the ciphertext. + */ +static int krb5enc_dispatch_decrypt(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + dst = src; + + if (req->src != req->dst) + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, ctx->enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(skreq, src, dst, + req->cryptlen - authsize, req->iv); + + return crypto_skcipher_decrypt(skreq); +} + +static int krb5enc_decrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_decrypt(req); + if (err < 0) + return err; + + return krb5enc_dispatch_decrypt_hash(req); +} + +static int krb5enc_init_tfm(struct crypto_aead *tfm) +{ + struct aead_instance *inst = aead_alg_instance(tfm); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + int err; + + auth = crypto_spawn_ahash(&ictx->auth); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + enc = crypto_spawn_skcipher(&ictx->enc); + err = PTR_ERR(enc); + if (IS_ERR(enc)) + goto err_free_ahash; + + ctx->auth = auth; + ctx->enc = enc; + + crypto_aead_set_reqsize( + tfm, + sizeof(struct krb5enc_request_ctx) + + ictx->reqoff + /* Space for two checksums */ + umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth), + sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc))); + + return 0; + +err_free_ahash: + crypto_free_ahash(auth); + return err; +} + +static void krb5enc_exit_tfm(struct crypto_aead *tfm) +{ + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_ahash(ctx->auth); + crypto_free_skcipher(ctx->enc); +} + +static void krb5enc_free(struct aead_instance *inst) +{ + struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->enc); + crypto_drop_ahash(&ctx->auth); + kfree(inst); +} + +/* + * Create an instance of a template for a specific hash and cipher pair. + */ +static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct krb5enc_instance_ctx *ictx; + struct skcipher_alg_common *enc; + struct hash_alg_common *auth; + struct aead_instance *inst; + struct crypto_alg *auth_base; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) { + pr_err("attr_type failed\n"); + return err; + } + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = aead_instance_ctx(inst); + + err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) { + pr_err("grab ahash failed\n"); + goto err_free_inst; + } + auth = crypto_spawn_ahash_alg(&ictx->auth); + auth_base = &auth->base; + + err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[2]), 0, mask); + if (err) { + pr_err("grab skcipher failed\n"); + goto err_free_inst; + } + enc = crypto_spawn_skcipher_alg_common(&ictx->enc); + + ictx->reqoff = 2 * auth->digestsize; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_name, + enc->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_driver_name, + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + + auth_base->cra_priority; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx); + + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; + inst->alg.maxauthsize = auth->digestsize; + + inst->alg.init = krb5enc_init_tfm; + inst->alg.exit = krb5enc_exit_tfm; + + inst->alg.setkey = krb5enc_setkey; + inst->alg.encrypt = krb5enc_encrypt; + inst->alg.decrypt = krb5enc_decrypt; + + inst->free = krb5enc_free; + + err = aead_register_instance(tmpl, inst); + if (err) { + pr_err("ref failed\n"); + goto err_free_inst; + } + + return 0; + +err_free_inst: + krb5enc_free(inst); + return err; +} + +static struct crypto_template crypto_krb5enc_tmpl = { + .name = "krb5enc", + .create = krb5enc_create, + .module = THIS_MODULE, +}; + +static int __init crypto_krb5enc_module_init(void) +{ + return crypto_register_template(&crypto_krb5enc_tmpl); +} + +static void __exit crypto_krb5enc_module_exit(void) +{ + crypto_unregister_template(&crypto_krb5enc_tmpl); +} + +module_init(crypto_krb5enc_module_init); +module_exit(crypto_krb5enc_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961"); +MODULE_ALIAS_CRYPTO("krb5enc"); diff --git a/crypto/lrw.c b/crypto/lrw.c index e216fbf2b786..dd403b800513 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) while (w.nbytes) { unsigned int avail = w.nbytes; - be128 *wsrc; + const be128 *wsrc; be128 *wdst; wsrc = w.src.virt.addr; @@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); @@ -420,7 +420,7 @@ static void __exit lrw_module_exit(void) crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(lrw_module_init); +module_init(lrw_module_init); module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 0f1bd7dcde24..c2e2c38b5aa8 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -29,25 +29,6 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg( return container_of(alg, struct lskcipher_alg, co.base); } -static inline struct crypto_istat_cipher *lskcipher_get_stat( - struct lskcipher_alg *alg) -{ - return skcipher_get_stat_common(&alg->co); -} - -static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) -{ - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err) - atomic64_inc(&istat->err_cnt); - - return err; -} - static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, const u8 *key, unsigned int keylen) { @@ -147,20 +128,13 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, u32 flags)) { unsigned long alignmask = crypto_lskcipher_alignmask(tfm); - struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - int ret; if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & - alignmask) { - ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, - crypt); - goto out; - } + alignmask) + return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); - ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); - -out: - return crypto_lskcipher_errstat(alg, ret); + return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); } int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, @@ -168,13 +142,6 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(len, &istat->encrypt_tlen); - } - return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); @@ -184,13 +151,6 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, { struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(len, &istat->decrypt_tlen); - } - return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); } EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); @@ -320,28 +280,6 @@ static int __maybe_unused crypto_lskcipher_report( sizeof(rblkcipher), &rblkcipher); } -static int __maybe_unused crypto_lskcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); - struct crypto_istat_cipher *istat; - struct crypto_stat_cipher rcipher; - - istat = lskcipher_get_stat(skcipher); - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - static const struct crypto_type crypto_lskcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_lskcipher_init_tfm, @@ -352,13 +290,11 @@ static const struct crypto_type crypto_lskcipher_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_lskcipher_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_lskcipher_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_LSKCIPHER, .tfmsize = offsetof(struct crypto_lskcipher, base), + .algsize = offsetof(struct lskcipher_alg, co.base), }; static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) diff --git a/crypto/lz4.c b/crypto/lz4.c index 0606f8862e78..7a984ae5ae52 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -12,11 +12,7 @@ #include <linux/lz4.h> #include <crypto/internal/scompress.h> -struct lz4_ctx { - void *lz4_comp_mem; -}; - -static void *lz4_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4_alloc_ctx(void) { void *ctx; @@ -27,29 +23,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4_init(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4_free_ctx(void *ctx) { vfree(ctx); } -static void lz4_exit(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4_free_ctx(NULL, ctx->lz4_comp_mem); -} - static int __lz4_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -70,14 +48,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); -} - static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -97,26 +67,6 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4 = { - .cra_name = "lz4", - .cra_driver_name = "lz4-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4_init, - .cra_exit = lz4_exit, - .cra_u = { .compress = { - .coa_compress = lz4_compress_crypto, - .coa_decompress = lz4_decompress_crypto } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lz4_alloc_ctx, .free_ctx = lz4_free_ctx, @@ -131,28 +81,15 @@ static struct scomp_alg scomp = { static int __init lz4_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4_mod_fini(void) { - crypto_unregister_alg(&alg_lz4); crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4_mod_init); +module_init(lz4_mod_init); module_exit(lz4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index d7cc94aa2fcf..9c61d05b6214 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -4,18 +4,13 @@ * * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> -#include <crypto/internal/scompress.h> - -struct lz4hc_ctx { - void *lz4hc_comp_mem; -}; -static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4hc_alloc_ctx(void) { void *ctx; @@ -26,29 +21,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4hc_init(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4hc_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4hc_free_ctx(void *ctx) { vfree(ctx); } -static void lz4hc_exit(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); -} - static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -69,16 +46,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4hc_compress_crypto(src, slen, dst, dlen, - ctx->lz4hc_comp_mem); -} - static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -98,26 +65,6 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4hc = { - .cra_name = "lz4hc", - .cra_driver_name = "lz4hc-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4hc_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4hc_init, - .cra_exit = lz4hc_exit, - .cra_u = { .compress = { - .coa_compress = lz4hc_compress_crypto, - .coa_decompress = lz4hc_decompress_crypto } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lz4hc_alloc_ctx, .free_ctx = lz4hc_free_ctx, @@ -132,28 +79,15 @@ static struct scomp_alg scomp = { static int __init lz4hc_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4hc); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4hc); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4hc_mod_fini(void) { - crypto_unregister_alg(&alg_lz4hc); crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4hc_mod_init); +module_init(lz4hc_mod_init); module_exit(lz4hc_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..ba013f2d5090 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -3,19 +3,13 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> - -struct lzorle_ctx { - void *lzorle_comp_mem; -}; +#include <linux/module.h> +#include <linux/slab.h> -static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) +static void *lzorle_alloc_ctx(void) { void *ctx; @@ -26,36 +20,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzorle_init(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL); - if (IS_ERR(ctx->lzorle_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzorle_free_ctx(void *ctx) { kvfree(ctx); } -static void lzorle_exit(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - lzorle_free_ctx(NULL, ctx->lzorle_comp_mem); -} - static int __lzorle_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +40,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem); -} - static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +62,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzorle_decompress(src, slen, dst, dlen); -} - static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,19 +69,6 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzorle_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo-rle", - .cra_driver_name = "lzo-rle-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzorle_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzorle_init, - .cra_exit = lzorle_exit, - .cra_u = { .compress = { - .coa_compress = lzorle_compress, - .coa_decompress = lzorle_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lzorle_alloc_ctx, .free_ctx = lzorle_free_ctx, @@ -134,28 +83,15 @@ static struct scomp_alg scomp = { static int __init lzorle_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzorle_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } -subsys_initcall(lzorle_mod_init); +module_init(lzorle_mod_init); module_exit(lzorle_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..7867e2c67c4e 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -3,19 +3,13 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> - -struct lzo_ctx { - void *lzo_comp_mem; -}; +#include <linux/module.h> +#include <linux/slab.h> -static void *lzo_alloc_ctx(struct crypto_scomp *tfm) +static void *lzo_alloc_ctx(void) { void *ctx; @@ -26,36 +20,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzo_init(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); - if (IS_ERR(ctx->lzo_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzo_free_ctx(void *ctx) { kvfree(ctx); } -static void lzo_exit(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - lzo_free_ctx(NULL, ctx->lzo_comp_mem); -} - static int __lzo_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +40,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen, return 0; } -static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); -} - static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +62,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzo_decompress(src, slen, dst, dlen); -} - static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,19 +69,6 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzo_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo", - .cra_driver_name = "lzo-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzo_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzo_init, - .cra_exit = lzo_exit, - .cra_u = { .compress = { - .coa_compress = lzo_compress, - .coa_decompress = lzo_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lzo_alloc_ctx, .free_ctx = lzo_free_ctx, @@ -134,28 +83,15 @@ static struct scomp_alg scomp = { static int __init lzo_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzo_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } -subsys_initcall(lzo_mod_init); +module_init(lzo_mod_init); module_exit(lzo_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md4.c b/crypto/md4.c index 2e7f2f319f95..55bf47e23c13 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(md4_mod_init); +module_init(md4_mod_init); module_exit(md4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md5.c b/crypto/md5.c index 72c0c46fb5ee..32c0819f5118 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -17,11 +17,9 @@ */ #include <crypto/internal/hash.h> #include <crypto/md5.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -#include <linux/types.h> -#include <asm/byteorder.h> const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, @@ -120,10 +118,11 @@ static void md5_transform(__u32 *hash, __u32 const *in) hash[3] += d; } -static inline void md5_transform_helper(struct md5_state *ctx) +static inline void md5_transform_helper(struct md5_state *ctx, + u32 block[MD5_BLOCK_WORDS]) { - le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); - md5_transform(ctx->hash, ctx->block); + le32_to_cpu_array(block, MD5_BLOCK_WORDS); + md5_transform(ctx->hash, block); } static int md5_init(struct shash_desc *desc) @@ -142,76 +141,53 @@ static int md5_init(struct shash_desc *desc) static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *mctx = shash_desc_ctx(desc); - const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + u32 block[MD5_BLOCK_WORDS]; mctx->byte_count += len; - - if (avail > len) { - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, len); - return 0; - } - - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, avail); - - md5_transform_helper(mctx); - data += avail; - len -= avail; - - while (len >= sizeof(mctx->block)) { - memcpy(mctx->block, data, sizeof(mctx->block)); - md5_transform_helper(mctx); - data += sizeof(mctx->block); - len -= sizeof(mctx->block); - } - - memcpy(mctx->block, data, len); - - return 0; + do { + memcpy(block, data, sizeof(block)); + md5_transform_helper(mctx, block); + data += sizeof(block); + len -= sizeof(block); + } while (len >= sizeof(block)); + memzero_explicit(block, sizeof(block)); + mctx->byte_count -= len; + return len; } -static int md5_final(struct shash_desc *desc, u8 *out) +static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len, + u8 *out) { struct md5_state *mctx = shash_desc_ctx(desc); - const unsigned int offset = mctx->byte_count & 0x3f; - char *p = (char *)mctx->block + offset; - int padding = 56 - (offset + 1); + u32 block[MD5_BLOCK_WORDS]; + unsigned int offset; + int padding; + char *p; + + memcpy(block, data, len); + + offset = len; + p = (char *)block + offset; + padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); - md5_transform_helper(mctx); - p = (char *)mctx->block; + md5_transform_helper(mctx, block); + p = (char *)block; padding = 56; } memset(p, 0, padding); - mctx->block[14] = mctx->byte_count << 3; - mctx->block[15] = mctx->byte_count >> 29; - le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - - sizeof(u64)) / sizeof(u32)); - md5_transform(mctx->hash, mctx->block); + mctx->byte_count += len; + block[14] = mctx->byte_count << 3; + block[15] = mctx->byte_count >> 29; + le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32)); + md5_transform(mctx->hash, block); + memzero_explicit(block, sizeof(block)); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); - memset(mctx, 0, sizeof(*mctx)); - - return 0; -} - -static int md5_export(struct shash_desc *desc, void *out) -{ - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(out, ctx, sizeof(*ctx)); - return 0; -} - -static int md5_import(struct shash_desc *desc, const void *in) -{ - struct md5_state *ctx = shash_desc_ctx(desc); - memcpy(ctx, in, sizeof(*ctx)); return 0; } @@ -219,14 +195,12 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, - .final = md5_final, - .export = md5_export, - .import = md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .finup = md5_finup, + .descsize = MD5_STATE_SIZE, .base = { .cra_name = "md5", .cra_driver_name = "md5-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -242,7 +216,7 @@ static void __exit md5_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(md5_mod_init); +module_init(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index f4c31049601c..69ad35f524d7 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -7,7 +7,7 @@ * Copyright (c) 2004 Jouni Malinen <j@w1.fi> */ #include <crypto/internal/hash.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> @@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void) } -subsys_initcall(michael_mic_init); +module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c index 8a3006c3b51b..2b648615b5ec 100644 --- a/crypto/nhpoly1305.c +++ b/crypto/nhpoly1305.c @@ -30,7 +30,7 @@ * (https://cr.yp.to/mac/poly1305-20050329.pdf) */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> @@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void) crypto_unregister_shash(&nhpoly1305_alg); } -subsys_initcall(nhpoly1305_mod_init); +module_init(nhpoly1305_mod_init); module_exit(nhpoly1305_mod_exit); MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function"); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ab469ba50c13..d092717ea4fc 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { - memcpy(tmpbuf, src, bsize); - crypto_xor(iv, src, bsize); - crypto_cipher_encrypt_one(tfm, src, iv); - crypto_xor_cpy(iv, tmpbuf, src, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_xor(iv, dst, bsize); + crypto_cipher_encrypt_one(tfm, dst, iv); + crypto_xor_cpy(iv, tmpbuf, dst, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { - memcpy(tmpbuf, src, bsize); - crypto_cipher_decrypt_one(tfm, src, src); - crypto_xor(src, iv, bsize); - crypto_xor_cpy(iv, src, tmpbuf, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_cipher_decrypt_one(tfm, dst, dst); + crypto_xor(dst, iv, bsize); + crypto_xor_cpy(iv, dst, tmpbuf, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -186,10 +186,10 @@ static void __exit crypto_pcbc_module_exit(void) crypto_unregister_template(&crypto_pcbc_tmpl); } -subsys_initcall(crypto_pcbc_module_init); +module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index d0d954fe9d54..c33d29a523e0 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -117,8 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req) err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; - if (err == -EBUSY) - return -EAGAIN; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_encrypt(creq); + } return err; } @@ -166,8 +168,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req) err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; - if (err == -EBUSY) - return -EAGAIN; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_decrypt(creq); + } return err; } @@ -377,7 +381,7 @@ static void __exit pcrypt_exit(void) kset_unregister(pcrypt_kset); } -subsys_initcall(pcrypt_init); +module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c deleted file mode 100644 index 94af47eb6fa6..000000000000 --- a/crypto/poly1305_generic.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Poly1305 authenticator algorithm, RFC7539 - * - * Copyright (C) 2015 Martin Willi - * - * Based on public domain code by Andrew Moon and Daniel J. Bernstein. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/unaligned.h> - -static int crypto_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) -{ - if (!dctx->sset) { - if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { - poly1305_core_setkey(&dctx->core_r, src); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->rset = 2; - } - if (srclen >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return srclen; -} - -static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) -{ - unsigned int datalen; - - if (unlikely(!dctx->sset)) { - datalen = crypto_poly1305_setdesckey(dctx, src, srclen); - src += srclen - datalen; - srclen = datalen; - } - - poly1305_core_blocks(&dctx->h, &dctx->core_r, src, - srclen / POLY1305_BLOCK_SIZE, 1); -} - -static int crypto_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - poly1305_blocks(dctx, src, srclen); - src += srclen - (srclen % POLY1305_BLOCK_SIZE); - srclen %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } - - return 0; -} - -static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_generic(dctx, dst); - return 0; -} - -static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, - .update = crypto_poly1305_update, - .final = crypto_poly1305_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-generic", - .cra_priority = 100, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_mod_init(void) -{ - return crypto_register_shash(&poly1305_alg); -} - -static void __exit poly1305_mod_exit(void) -{ - crypto_unregister_shash(&poly1305_alg); -} - -subsys_initcall(poly1305_mod_init); -module_exit(poly1305_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("Poly1305 authenticator"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-generic"); diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c index 16bfa6925b31..db8adb56e4ca 100644 --- a/crypto/polyval-generic.c +++ b/crypto/polyval-generic.c @@ -44,15 +44,15 @@ * */ -#include <asm/unaligned.h> -#include <crypto/algapi.h> #include <crypto/gf128mul.h> -#include <crypto/polyval.h> #include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/polyval.h> +#include <crypto/utils.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> struct polyval_tfm_ctx { struct gf128mul_4k *gf128; @@ -63,7 +63,6 @@ struct polyval_desc_ctx { u8 buffer[POLYVAL_BLOCK_SIZE]; be128 buffer128; }; - u32 bytes; }; static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], @@ -76,46 +75,6 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], put_unaligned(swab64(b), (u64 *)&dst[0]); } -/* - * Performs multiplication in the POLYVAL field using the GHASH field as a - * subroutine. This function is used as a fallback for hardware accelerated - * implementations when simd registers are unavailable. - * - * Note: This function is not used for polyval-generic, instead we use the 4k - * lookup table implementation for finite field multiplication. - */ -void polyval_mul_non4k(u8 *op1, const u8 *op2) -{ - be128 a, b; - - // Assume one argument is in Montgomery form and one is not. - copy_and_reverse((u8 *)&a, op1); - copy_and_reverse((u8 *)&b, op2); - gf128mul_x_lle(&a, &a); - gf128mul_lle(&a, &b); - copy_and_reverse(op1, (u8 *)&a); -} -EXPORT_SYMBOL_GPL(polyval_mul_non4k); - -/* - * Perform a POLYVAL update using non4k multiplication. This function is used - * as a fallback for hardware accelerated implementations when simd registers - * are unavailable. - * - * Note: This function is not used for polyval-generic, instead we use the 4k - * lookup table implementation of finite field multiplication. - */ -void polyval_update_non4k(const u8 *key, const u8 *in, - size_t nblocks, u8 *accumulator) -{ - while (nblocks--) { - crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE); - polyval_mul_non4k(accumulator, key); - in += POLYVAL_BLOCK_SIZE; - } -} -EXPORT_SYMBOL_GPL(polyval_update_non4k); - static int polyval_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -154,56 +113,53 @@ static int polyval_update(struct shash_desc *desc, { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); - u8 *pos; u8 tmp[POLYVAL_BLOCK_SIZE]; - int n; - if (dctx->bytes) { - n = min(srclen, dctx->bytes); - pos = dctx->buffer + dctx->bytes - 1; - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos-- ^= *src++; - - if (!dctx->bytes) - gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); - } - - while (srclen >= POLYVAL_BLOCK_SIZE) { + do { copy_and_reverse(tmp, src); crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE); gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); src += POLYVAL_BLOCK_SIZE; srclen -= POLYVAL_BLOCK_SIZE; - } + } while (srclen >= POLYVAL_BLOCK_SIZE); - if (srclen) { - dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; - pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1; - while (srclen--) - *pos-- ^= *src++; + return srclen; +} + +static int polyval_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + + if (len) { + u8 tmp[POLYVAL_BLOCK_SIZE] = {}; + + memcpy(tmp, src, len); + polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE); } + copy_and_reverse(dst, dctx->buffer); + return 0; +} + +static int polyval_export(struct shash_desc *desc, void *out) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + copy_and_reverse(out, dctx->buffer); return 0; } -static int polyval_final(struct shash_desc *desc, u8 *dst) +static int polyval_import(struct shash_desc *desc, const void *in) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); - if (dctx->bytes) - gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); - copy_and_reverse(dst, dctx->buffer); + copy_and_reverse(dctx->buffer, in); return 0; } -static void polyval_exit_tfm(struct crypto_tfm *tfm) +static void polyval_exit_tfm(struct crypto_shash *tfm) { - struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm); gf128mul_free_4k(ctx->gf128); } @@ -212,17 +168,21 @@ static struct shash_alg polyval_alg = { .digestsize = POLYVAL_DIGEST_SIZE, .init = polyval_init, .update = polyval_update, - .final = polyval_final, + .finup = polyval_finup, .setkey = polyval_setkey, + .export = polyval_export, + .import = polyval_import, + .exit_tfm = polyval_exit_tfm, + .statesize = sizeof(struct polyval_desc_ctx), .descsize = sizeof(struct polyval_desc_ctx), .base = { .cra_name = "polyval", .cra_driver_name = "polyval-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = POLYVAL_BLOCK_SIZE, .cra_ctxsize = sizeof(struct polyval_tfm_ctx), .cra_module = THIS_MODULE, - .cra_exit = polyval_exit_tfm, }, }; @@ -236,7 +196,7 @@ static void __exit polyval_mod_exit(void) crypto_unregister_shash(&polyval_alg); } -subsys_initcall(polyval_mod_init); +module_init(polyval_mod_init); module_exit(polyval_mod_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/proc.c b/crypto/proc.c index 56c7c78df297..82f15b967e85 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -47,13 +47,10 @@ static int c_show(struct seq_file *m, void *p) (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); seq_printf(m, "internal : %s\n", - (alg->cra_flags & CRYPTO_ALG_INTERNAL) ? - "yes" : "no"); - if (fips_enabled) { + str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL)); + if (fips_enabled) seq_printf(m, "fips : %s\n", - (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ? - "no" : "yes"); - } + str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)); if (alg->cra_flags & CRYPTO_ALG_LARVAL) { seq_printf(m, "type : larval\n"); @@ -75,9 +72,6 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; - case CRYPTO_ALG_TYPE_COMPRESS: - seq_printf(m, "type : compression\n"); - break; default: seq_printf(m, "type : unknown\n"); break; diff --git a/crypto/rmd160.c b/crypto/rmd160.c index c5fe4034b153..9860b60c9be4 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -9,18 +9,14 @@ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> */ #include <crypto/internal/hash.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <asm/byteorder.h> - +#include <linux/string.h> #include "ripemd.h" struct rmd160_ctx { u64 byte_count; u32 state[5]; - __le32 buffer[16]; }; #define K1 RMD_K1 @@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc) rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - return 0; } static int rmd160_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + int remain = len - round_down(len, RMD160_BLOCK_SIZE); struct rmd160_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; + __le32 buffer[RMD160_BLOCK_SIZE / 4]; - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); + rctx->byte_count += len - remain; - rmd160_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd160_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } + do { + memcpy(buffer, data, sizeof(buffer)); + rmd160_transform(rctx->state, buffer); + data += sizeof(buffer); + len -= sizeof(buffer); + } while (len >= sizeof(buffer)); - memcpy(rctx->buffer, data, len); - -out: - return 0; + memzero_explicit(buffer, sizeof(buffer)); + return remain; } /* Add padding and return the message digest. */ -static int rmd160_final(struct shash_desc *desc, u8 *out) +static int rmd160_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { + unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1; struct rmd160_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; + union { + __le64 l64[RMD160_BLOCK_SIZE / 4]; + __le32 l32[RMD160_BLOCK_SIZE / 2]; + u8 u8[RMD160_BLOCK_SIZE * 2]; + } block = {}; __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd160_update(desc, padding, padlen); + u32 i; - /* Append length */ - rmd160_update(desc, (const u8 *)&bits, sizeof(bits)); + rctx->byte_count += len; + if (len >= bit_offset * 8) + bit_offset += RMD160_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3); + + rmd160_transform(rctx->state, block.l32); + if (bit_offset > RMD160_BLOCK_SIZE / 8) + rmd160_transform(rctx->state, + block.l32 + RMD160_BLOCK_SIZE / 4); + memzero_explicit(&block, sizeof(block)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - return 0; } @@ -338,11 +321,12 @@ static struct shash_alg alg = { .digestsize = RMD160_DIGEST_SIZE, .init = rmd160_init, .update = rmd160_update, - .final = rmd160_final, + .finup = rmd160_finup, .descsize = sizeof(struct rmd160_ctx), .base = { .cra_name = "rmd160", .cra_driver_name = "rmd160-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = RMD160_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(rmd160_mod_init); +module_init(rmd160_mod_init); module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/rng.c b/crypto/rng.c index 279dffdebf59..b8ae6ebc091d 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -30,30 +30,24 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { - struct rng_alg *alg = crypto_rng_alg(tfm); u8 *buf = NULL; int err; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&rng_get_stat(alg)->seed_cnt); - if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); - err = -ENOMEM; if (!buf) - goto out; + return -ENOMEM; err = get_random_bytes_wait(buf, slen); if (err) - goto free_buf; + goto out; seed = buf; } - err = alg->seed(tfm, seed, slen); -free_buf: - kfree_sensitive(buf); + err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); out: - return crypto_rng_errstat(alg, err); + kfree_sensitive(buf); + return err; } EXPORT_SYMBOL_GPL(crypto_rng_reset); @@ -91,27 +85,6 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "seedsize : %u\n", seedsize(alg)); } -static int __maybe_unused crypto_rng_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct rng_alg *rng = __crypto_rng_alg(alg); - struct crypto_istat_rng *istat; - struct crypto_stat_rng rrng; - - istat = rng_get_stat(rng); - - memset(&rrng, 0, sizeof(rrng)); - - strscpy(rrng.type, "rng", sizeof(rrng.type)); - - rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt); - rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen); - rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt); - rrng.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); -} - static const struct crypto_type crypto_rng_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_rng_init_tfm, @@ -121,13 +94,11 @@ static const struct crypto_type crypto_rng_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_rng_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_rng_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_RNG, .tfmsize = offsetof(struct crypto_rng, base), + .algsize = offsetof(struct rng_alg, base), }; struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) @@ -199,7 +170,6 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng); int crypto_register_rng(struct rng_alg *alg) { - struct crypto_istat_rng *istat = rng_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->seedsize > PAGE_SIZE / 8) @@ -209,9 +179,6 @@ int crypto_register_rng(struct rng_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_RNG; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_rng); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index cd501195f34a..50bdb18e7b48 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -16,101 +16,6 @@ #include <linux/random.h> #include <linux/scatterlist.h> -/* - * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. - */ -static const u8 rsa_digest_info_md5[] = { - 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ - 0x05, 0x00, 0x04, 0x10 -}; - -static const u8 rsa_digest_info_sha1[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x0e, 0x03, 0x02, 0x1a, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 rsa_digest_info_rmd160[] = { - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, - 0x2b, 0x24, 0x03, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x14 -}; - -static const u8 rsa_digest_info_sha224[] = { - 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, - 0x05, 0x00, 0x04, 0x1c -}; - -static const u8 rsa_digest_info_sha256[] = { - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, - 0x05, 0x00, 0x04, 0x20 -}; - -static const u8 rsa_digest_info_sha384[] = { - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, - 0x05, 0x00, 0x04, 0x30 -}; - -static const u8 rsa_digest_info_sha512[] = { - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, - 0x05, 0x00, 0x04, 0x40 -}; - -static const u8 rsa_digest_info_sha3_256[] = { - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, - 0x05, 0x00, 0x04, 0x20 -}; - -static const u8 rsa_digest_info_sha3_384[] = { - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, - 0x05, 0x00, 0x04, 0x30 -}; - -static const u8 rsa_digest_info_sha3_512[] = { - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A, - 0x05, 0x00, 0x04, 0x40 -}; - -static const struct rsa_asn1_template { - const char *name; - const u8 *data; - size_t size; -} rsa_asn1_templates[] = { -#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) } - _(md5), - _(sha1), - _(rmd160), - _(sha256), - _(sha384), - _(sha512), - _(sha224), -#undef _ -#define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) } - _(256), - _(384), - _(512), -#undef _ - { NULL } -}; - -static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) -{ - const struct rsa_asn1_template *p; - - for (p = rsa_asn1_templates; p->name; p++) - if (strcmp(name, p->name) == 0) - return p; - return NULL; -} - struct pkcs1pad_ctx { struct crypto_akcipher *child; unsigned int key_size; @@ -118,7 +23,6 @@ struct pkcs1pad_ctx { struct pkcs1pad_inst_ctx { struct crypto_akcipher_spawn spawn; - const struct rsa_asn1_template *digest_info; }; struct pkcs1pad_request { @@ -131,42 +35,16 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err; - - ctx->key_size = 0; - - err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); - if (err) - return err; - - /* Find out new modulus size from rsa implementation */ - err = crypto_akcipher_maxsize(ctx->child); - if (err > PAGE_SIZE) - return -ENOTSUPP; - ctx->key_size = err; - return 0; + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen); } static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err; - - ctx->key_size = 0; - - err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); - if (err) - return err; - /* Find out new modulus size from rsa implementation */ - err = crypto_akcipher_maxsize(ctx->child); - if (err > PAGE_SIZE) - return -ENOTSUPP; - - ctx->key_size = err; - return 0; + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen); } static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) @@ -174,9 +52,9 @@ static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); /* - * The maximum destination buffer size for the encrypt/sign operations + * The maximum destination buffer size for the encrypt operation * will be the same as for RSA, even though it's smaller for - * decrypt/verify. + * decrypt. */ return ctx->key_size; @@ -194,7 +72,7 @@ static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, sg_chain(sg, nsegs, next); } -static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +static int pkcs1pad_encrypt_complete(struct akcipher_request *req, int err) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); @@ -233,14 +111,14 @@ out: return err; } -static void pkcs1pad_encrypt_sign_complete_cb(void *data, int err) +static void pkcs1pad_encrypt_complete_cb(void *data, int err) { struct akcipher_request *req = data; if (err == -EINPROGRESS) goto out; - err = pkcs1pad_encrypt_sign_complete(req, err); + err = pkcs1pad_encrypt_complete(req, err); out: akcipher_request_complete(req, err); @@ -281,7 +159,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_encrypt_sign_complete_cb, req); + pkcs1pad_encrypt_complete_cb, req); /* Reuse output buffer */ akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, @@ -289,7 +167,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) err = crypto_akcipher_encrypt(&req_ctx->child_req); if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_encrypt_sign_complete(req, err); + return pkcs1pad_encrypt_complete(req, err); return err; } @@ -394,195 +272,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) return err; } -static int pkcs1pad_sign(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - struct akcipher_instance *inst = akcipher_alg_instance(tfm); - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); - const struct rsa_asn1_template *digest_info = ictx->digest_info; - int err; - unsigned int ps_end, digest_info_size = 0; - - if (!ctx->key_size) - return -EINVAL; - - if (digest_info) - digest_info_size = digest_info->size; - - if (req->src_len + digest_info_size > ctx->key_size - 11) - return -EOVERFLOW; - - if (req->dst_len < ctx->key_size) { - req->dst_len = ctx->key_size; - return -EOVERFLOW; - } - - req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, - GFP_KERNEL); - if (!req_ctx->in_buf) - return -ENOMEM; - - ps_end = ctx->key_size - digest_info_size - req->src_len - 2; - req_ctx->in_buf[0] = 0x01; - memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); - req_ctx->in_buf[ps_end] = 0x00; - - if (digest_info) - memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, - digest_info->size); - - pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, - ctx->key_size - 1 - req->src_len, req->src); - - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_encrypt_sign_complete_cb, req); - - /* Reuse output buffer */ - akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, - req->dst, ctx->key_size - 1, req->dst_len); - - err = crypto_akcipher_decrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_encrypt_sign_complete(req, err); - - return err; -} - -static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - struct akcipher_instance *inst = akcipher_alg_instance(tfm); - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); - const struct rsa_asn1_template *digest_info = ictx->digest_info; - const unsigned int sig_size = req->src_len; - const unsigned int digest_size = req->dst_len; - unsigned int dst_len; - unsigned int pos; - u8 *out_buf; - - if (err) - goto done; - - err = -EINVAL; - dst_len = req_ctx->child_req.dst_len; - if (dst_len < ctx->key_size - 1) - goto done; - - out_buf = req_ctx->out_buf; - if (dst_len == ctx->key_size) { - if (out_buf[0] != 0x00) - /* Decrypted value had no leading 0 byte */ - goto done; - - dst_len--; - out_buf++; - } - - err = -EBADMSG; - if (out_buf[0] != 0x01) - goto done; - - for (pos = 1; pos < dst_len; pos++) - if (out_buf[pos] != 0xff) - break; - - if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) - goto done; - pos++; - - if (digest_info) { - if (digest_info->size > dst_len - pos) - goto done; - if (crypto_memneq(out_buf + pos, digest_info->data, - digest_info->size)) - goto done; - - pos += digest_info->size; - } - - err = 0; - - if (digest_size != dst_len - pos) { - err = -EKEYREJECTED; - req->dst_len = dst_len - pos; - goto done; - } - /* Extract appended digest. */ - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, sig_size + digest_size), - req_ctx->out_buf + ctx->key_size, - digest_size, sig_size); - /* Do the actual verification step. */ - if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, - digest_size) != 0) - err = -EKEYREJECTED; -done: - kfree_sensitive(req_ctx->out_buf); - - return err; -} - -static void pkcs1pad_verify_complete_cb(void *data, int err) -{ - struct akcipher_request *req = data; - - if (err == -EINPROGRESS) - goto out; - - err = pkcs1pad_verify_complete(req, err); - -out: - akcipher_request_complete(req, err); -} - -/* - * The verify operation is here for completeness similar to the verification - * defined in RFC2313 section 10.2 except that block type 0 is not accepted, - * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to - * retrieve the DigestInfo from a signature, instead the user is expected - * to call the sign operation to generate the expected signature and compare - * signatures instead of the message-digests. - */ -static int pkcs1pad_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - const unsigned int sig_size = req->src_len; - const unsigned int digest_size = req->dst_len; - int err; - - if (WARN_ON(req->dst) || WARN_ON(!digest_size) || - !ctx->key_size || sig_size != ctx->key_size) - return -EINVAL; - - req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL); - if (!req_ctx->out_buf) - return -ENOMEM; - - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, - ctx->key_size, NULL); - - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, - pkcs1pad_verify_complete_cb, req); - - /* Reuse input buffer, output to a new buffer */ - akcipher_request_set_crypt(&req_ctx->child_req, req->src, - req_ctx->out_sg, sig_size, ctx->key_size); - - err = crypto_akcipher_encrypt(&req_ctx->child_req); - if (err != -EINPROGRESS && err != -EBUSY) - return pkcs1pad_verify_complete(req, err); - - return err; -} - static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) { struct akcipher_instance *inst = akcipher_alg_instance(tfm); @@ -624,7 +313,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; struct akcipher_alg *rsa_alg; - const char *hash_name; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); @@ -650,36 +338,15 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) } err = -ENAMETOOLONG; - hash_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(hash_name)) { - if (snprintf(inst->alg.base.cra_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - } else { - ctx->digest_info = rsa_lookup_asn1(hash_name); - if (!ctx->digest_info) { - err = -EINVAL; - goto err_free_inst; - } - - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, - hash_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", - rsa_alg->base.cra_driver_name, - hash_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - } + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); @@ -689,8 +356,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = pkcs1pad_encrypt; inst->alg.decrypt = pkcs1pad_decrypt; - inst->alg.sign = pkcs1pad_sign; - inst->alg.verify = pkcs1pad_verify; inst->alg.set_pub_key = pkcs1pad_set_pub_key; inst->alg.set_priv_key = pkcs1pad_set_priv_key; inst->alg.max_size = pkcs1pad_get_max_size; diff --git a/crypto/rsa.c b/crypto/rsa.c index d9be9e86097e..6c7734083c98 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -98,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) goto err_free_mpi; /* (2iii) h = (m_1 - m_2) * qInv mod p */ - mpi_sub(m12_or_qh, m_or_m1_or_h, m2); - mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); + ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?: + mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); /* (2iv) m = m_2 + q * h */ - mpi_mul(m12_or_qh, key->q, m_or_m1_or_h); - mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); - - ret = 0; + ret = ret ?: + mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?: + mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); err_free_mpi: mpi_free(m12_or_qh); @@ -236,6 +235,7 @@ static int rsa_check_key_length(unsigned int len) static int rsa_check_exponent_fips(MPI e) { MPI e_max = NULL; + int err; /* check if odd */ if (!mpi_test_bit(e, 0)) { @@ -250,7 +250,12 @@ static int rsa_check_exponent_fips(MPI e) e_max = mpi_alloc(0); if (!e_max) return -ENOMEM; - mpi_set_bit(e_max, 256); + + err = mpi_set_bit(e_max, 256); + if (err) { + mpi_free(e_max); + return err; + } if (mpi_cmp(e, e_max) >= 0) { mpi_free(e_max); @@ -402,21 +407,30 @@ static int __init rsa_init(void) return err; err = crypto_register_template(&rsa_pkcs1pad_tmpl); - if (err) { - crypto_unregister_akcipher(&rsa); - return err; - } + if (err) + goto err_unregister_rsa; + + err = crypto_register_template(&rsassa_pkcs1_tmpl); + if (err) + goto err_unregister_rsa_pkcs1pad; return 0; + +err_unregister_rsa_pkcs1pad: + crypto_unregister_template(&rsa_pkcs1pad_tmpl); +err_unregister_rsa: + crypto_unregister_akcipher(&rsa); + return err; } static void __exit rsa_exit(void) { + crypto_unregister_template(&rsassa_pkcs1_tmpl); crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } -subsys_initcall(rsa_init); +module_init(rsa_init); module_exit(rsa_exit); MODULE_ALIAS_CRYPTO("rsa"); MODULE_LICENSE("GPL"); diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c new file mode 100644 index 000000000000..94fa5e9600e7 --- /dev/null +++ b/crypto/rsassa-pkcs1.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2) + * + * https://www.rfc-editor.org/rfc/rfc8017#section-8.2 + * + * Copyright (c) 2015 - 2024 Intel Corporation + */ + +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <crypto/akcipher.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/sig.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/sig.h> + +/* + * Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24) + * + * RSA keys are usually much larger than the hash of the message to be signed. + * The hash is therefore prepended by the Full Hash Prefix and a 0xff padding. + * The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID. + * + * https://www.rfc-editor.org/rfc/rfc9580#table-24 + */ + +static const u8 hash_prefix_none[] = { }; + +static const u8 hash_prefix_md5[] = { + 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */ + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */ + 0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */ +}; + +static const u8 hash_prefix_sha1[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x0e, 0x03, 0x02, 0x1a, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 hash_prefix_rmd160[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x24, 0x03, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 hash_prefix_sha224[] = { + 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, + 0x05, 0x00, 0x04, 0x1c +}; + +static const u8 hash_prefix_sha256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 hash_prefix_sha384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 hash_prefix_sha512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, + 0x05, 0x00, 0x04, 0x40 +}; + +static const u8 hash_prefix_sha3_256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 hash_prefix_sha3_384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 hash_prefix_sha3_512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a, + 0x05, 0x00, 0x04, 0x40 +}; + +static const struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +} hash_prefixes[] = { +#define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) } + _(none), + _(md5), + _(sha1), + _(rmd160), + _(sha256), + _(sha384), + _(sha512), + _(sha224), +#undef _ +#define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) } + _(256), + _(384), + _(512), +#undef _ + { NULL } +}; + +static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name) +{ + const struct hash_prefix *p; + + for (p = hash_prefixes; p->name; p++) + if (strcmp(name, p->name) == 0) + return p; + return NULL; +} + +static bool rsassa_pkcs1_invalid_hash_len(unsigned int len, + const struct hash_prefix *p) +{ + /* + * Legacy protocols such as TLS 1.1 or earlier and IKE version 1 + * do not prepend a Full Hash Prefix to the hash. In that case, + * the size of the Full Hash Prefix is zero. + */ + if (p->data == hash_prefix_none) + return false; + + /* + * The final byte of the Full Hash Prefix encodes the hash length. + * + * This needs to be revisited should hash algorithms with more than + * 1016 bits (127 bytes * 8) ever be added. The length would then + * be encoded into more than one byte by ASN.1. + */ + static_assert(HASH_MAX_DIGESTSIZE <= 127); + + return len != p->data[p->size - 1]; +} + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +static int rsassa_pkcs1_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + const struct hash_prefix *hash_prefix = ictx->hash_prefix; + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int pad_len; + unsigned int ps_end; + unsigned int len; + u8 *in_buf; + int err; + + if (!ctx->key_size) + return -EINVAL; + + if (dlen < ctx->key_size) + return -EOVERFLOW; + + if (rsassa_pkcs1_invalid_hash_len(slen, hash_prefix)) + return -EINVAL; + + if (slen + hash_prefix->size > ctx->key_size - 11) + return -EOVERFLOW; + + pad_len = ctx->key_size - slen - hash_prefix->size - 1; + + /* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */ + in_buf = dst; + memmove(in_buf + pad_len + hash_prefix->size, src, slen); + memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size); + + ps_end = pad_len - 1; + in_buf[0] = 0x01; + memset(in_buf + 1, 0xff, ps_end - 1); + in_buf[ps_end] = 0x00; + + + /* RFC 8017 sec 8.2.1 step 2 - RSA signature */ + err = crypto_akcipher_sync_decrypt(ctx->child, in_buf, + ctx->key_size - 1, in_buf, + ctx->key_size); + if (err < 0) + return err; + + len = err; + pad_len = ctx->key_size - len; + + /* Four billion to one */ + if (unlikely(pad_len)) { + memmove(dst + pad_len, dst, len); + memset(dst, 0, pad_len); + } + + return ctx->key_size; +} + +static int rsassa_pkcs1_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + const struct hash_prefix *hash_prefix = ictx->hash_prefix; + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child); + struct akcipher_request *child_req __free(kfree_sensitive) = NULL; + struct crypto_wait cwait; + struct scatterlist sg; + unsigned int dst_len; + unsigned int pos; + u8 *out_buf; + int err; + + /* RFC 8017 sec 8.2.2 step 1 - length checking */ + if (!ctx->key_size || + slen != ctx->key_size || + rsassa_pkcs1_invalid_hash_len(dlen, hash_prefix)) + return -EINVAL; + + /* RFC 8017 sec 8.2.2 step 2 - RSA verification */ + child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size, + GFP_KERNEL); + if (!child_req) + return -ENOMEM; + + out_buf = (u8 *)(child_req + 1) + child_reqsize; + memcpy(out_buf, src, slen); + + crypto_init_wait(&cwait); + sg_init_one(&sg, out_buf, slen); + akcipher_request_set_tfm(child_req, ctx->child); + akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen); + akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &cwait); + + err = crypto_akcipher_encrypt(child_req); + err = crypto_wait_req(err, &cwait); + if (err) + return err; + + /* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */ + dst_len = child_req->dst_len; + if (dst_len < ctx->key_size - 1) + return -EINVAL; + + if (dst_len == ctx->key_size) { + if (out_buf[0] != 0x00) + /* Encrypted value had no leading 0 byte */ + return -EINVAL; + + dst_len--; + out_buf++; + } + + if (out_buf[0] != 0x01) + return -EBADMSG; + + for (pos = 1; pos < dst_len; pos++) + if (out_buf[pos] != 0xff) + break; + + if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) + return -EBADMSG; + pos++; + + if (hash_prefix->size > dst_len - pos) + return -EBADMSG; + if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size)) + return -EBADMSG; + pos += hash_prefix->size; + + /* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */ + if (dlen != dst_len - pos) + return -EKEYREJECTED; + if (memcmp(digest, out_buf + pos, dlen) != 0) + return -EKEYREJECTED; + + return 0; +} + +static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return ctx->key_size * BITS_PER_BYTE; +} + +static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen); +} + +static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen); +} + +static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm) +{ + struct sig_instance *inst = sig_alg_instance(tfm); + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(&ictx->spawn); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm) +{ + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void rsassa_pkcs1_free(struct sig_instance *inst) +{ + struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst); + struct crypto_akcipher_spawn *spawn = &ctx->spawn; + + crypto_drop_akcipher(spawn); + kfree(inst); +} + +static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct rsassa_pkcs1_inst_ctx *ctx; + struct akcipher_alg *rsa_alg; + struct sig_instance *inst; + const char *hash_name; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + ctx = sig_instance_ctx(inst); + + err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); + + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { + err = -EINVAL; + goto err_free_inst; + } + + hash_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(hash_name)) { + err = PTR_ERR(hash_name); + goto err_free_inst; + } + + ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name); + if (!ctx->hash_prefix) { + err = -EINVAL; + goto err_free_inst; + } + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "pkcs1(%s,%s)", rsa_alg->base.cra_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "pkcs1(%s,%s)", rsa_alg->base.cra_driver_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx); + + inst->alg.init = rsassa_pkcs1_init_tfm; + inst->alg.exit = rsassa_pkcs1_exit_tfm; + + inst->alg.sign = rsassa_pkcs1_sign; + inst->alg.verify = rsassa_pkcs1_verify; + inst->alg.key_size = rsassa_pkcs1_key_size; + inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key; + inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key; + + inst->free = rsassa_pkcs1_free; + + err = sig_register_instance(tmpl, inst); + if (err) { +err_free_inst: + rsassa_pkcs1_free(inst); + } + return err; +} + +struct crypto_template rsassa_pkcs1_tmpl = { + .name = "pkcs1", + .create = rsassa_pkcs1_create, + .module = THIS_MODULE, +}; + +MODULE_ALIAS_CRYPTO("pkcs1"); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 16f6ba896fb6..1d010e2a1b1a 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -10,64 +10,119 @@ */ #include <crypto/scatterwalk.h> +#include <linux/crypto.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/scatterlist.h> +#include <linux/slab.h> -static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) +enum { + SKCIPHER_WALK_SLOW = 1 << 0, + SKCIPHER_WALK_COPY = 1 << 1, + SKCIPHER_WALK_DIFF = 1 << 2, + SKCIPHER_WALK_SLEEP = 1 << 3, +}; + +static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) +{ + return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes) { - void *src = out ? buf : sgdata; - void *dst = out ? sgdata : buf; + struct scatterlist *sg = walk->sg; + + nbytes += walk->offset - sg->offset; - memcpy(dst, src, nbytes); + while (nbytes > sg->length) { + nbytes -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + nbytes; } +EXPORT_SYMBOL_GPL(scatterwalk_skip); -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out) +inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes) { - for (;;) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - u8 *vaddr; + do { + unsigned int to_copy; - if (len_this_page > nbytes) - len_this_page = nbytes; + to_copy = scatterwalk_next(walk, nbytes); + memcpy(buf, walk->addr, to_copy); + scatterwalk_done_src(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk); - if (out != 2) { - vaddr = scatterwalk_map(walk); - memcpy_dir(buf, vaddr, len_this_page, out); - scatterwalk_unmap(vaddr); - } +inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes) +{ + do { + unsigned int to_copy; - scatterwalk_advance(walk, len_this_page); + to_copy = scatterwalk_next(walk, nbytes); + memcpy(walk->addr, buf, to_copy); + scatterwalk_done_dst(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk); - if (nbytes == len_this_page) - break; +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes) +{ + struct scatter_walk walk; - buf += len_this_page; - nbytes -= len_this_page; + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; - scatterwalk_pagedone(walk, out & 1, 1); - } + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_from_scatterwalk(buf, &walk, nbytes); } -EXPORT_SYMBOL_GPL(scatterwalk_copychunks); +EXPORT_SYMBOL_GPL(memcpy_from_sglist); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes) { struct scatter_walk walk; - struct scatterlist tmp[2]; - if (!nbytes) + if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; - sg = scatterwalk_ffwd(tmp, sg, start); + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_to_scatterwalk(&walk, buf, nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_to_sglist); + +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct skcipher_walk walk = {}; + + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; + + walk.total = nbytes; + + scatterwalk_start(&walk.in, src); + scatterwalk_start(&walk.out, dst); - scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); + skcipher_walk_first(&walk, true); + do { + if (walk.src.virt.addr != walk.dst.virt.addr) + memcpy(walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + skcipher_walk_done(&walk, 0); + } while (walk.nbytes); } -EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); +EXPORT_SYMBOL_GPL(memcpy_sglist); struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, @@ -91,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], return dst; } EXPORT_SYMBOL_GPL(scatterwalk_ffwd); + +static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + unsigned alignmask = walk->alignmask; + unsigned n; + void *buffer; + + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (!buffer) { + /* Min size for a buffer of bsize bytes aligned to alignmask */ + n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + buffer = kzalloc(n, skcipher_walk_gfp(walk)); + if (!buffer) + return skcipher_walk_done(walk, -ENOMEM); + walk->buffer = buffer; + } + + buffer = PTR_ALIGN(buffer, alignmask + 1); + memcpy_from_scatterwalk(buffer, &walk->in, bsize); + walk->out.__addr = buffer; + walk->in.__addr = walk->out.addr; + + walk->nbytes = bsize; + walk->flags |= SKCIPHER_WALK_SLOW; + + return 0; +} + +static int skcipher_next_copy(struct skcipher_walk *walk) +{ + void *tmp = walk->page; + + scatterwalk_map(&walk->in); + memcpy(tmp, walk->in.addr, walk->nbytes); + scatterwalk_unmap(&walk->in); + /* + * walk->in is advanced later when the number of bytes actually + * processed (which might be less than walk->nbytes) is known. + */ + + walk->in.__addr = tmp; + walk->out.__addr = tmp; + return 0; +} + +static int skcipher_next_fast(struct skcipher_walk *walk) +{ + unsigned long diff; + + diff = offset_in_page(walk->in.offset) - + offset_in_page(walk->out.offset); + diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - + (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); + + scatterwalk_map(&walk->out); + walk->in.__addr = walk->out.__addr; + + if (diff) { + walk->flags |= SKCIPHER_WALK_DIFF; + scatterwalk_map(&walk->in); + } + + return 0; +} + +static int skcipher_walk_next(struct skcipher_walk *walk) +{ + unsigned int bsize; + unsigned int n; + + n = walk->total; + bsize = min(walk->stride, max(n, walk->blocksize)); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + if (unlikely(walk->total < walk->blocksize)) + return skcipher_walk_done(walk, -EINVAL); + +slow_path: + return skcipher_next_slow(walk, bsize); + } + walk->nbytes = n; + + if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { + if (!walk->page) { + gfp_t gfp = skcipher_walk_gfp(walk); + + walk->page = (void *)__get_free_page(gfp); + if (!walk->page) + goto slow_path; + } + walk->flags |= SKCIPHER_WALK_COPY; + return skcipher_next_copy(walk); + } + + return skcipher_next_fast(walk); +} + +static int skcipher_copy_iv(struct skcipher_walk *walk) +{ + unsigned alignmask = walk->alignmask; + unsigned ivsize = walk->ivsize; + unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); + unsigned size; + u8 *iv; + + /* Min size for a buffer of stride + ivsize, aligned to alignmask */ + size = aligned_stride + ivsize + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); + if (!walk->buffer) + return -ENOMEM; + + iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; + + walk->iv = memcpy(iv, walk->iv, walk->ivsize); + return 0; +} + +int skcipher_walk_first(struct skcipher_walk *walk, bool atomic) +{ + if (WARN_ON_ONCE(in_hardirq())) + return -EDEADLK; + + walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP; + + walk->buffer = NULL; + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = skcipher_copy_iv(walk); + if (err) + return err; + } + + walk->page = NULL; + + return skcipher_walk_next(walk); +} +EXPORT_SYMBOL_GPL(skcipher_walk_first); + +/** + * skcipher_walk_done() - finish one step of a skcipher_walk + * @walk: the skcipher_walk + * @res: number of bytes *not* processed (>= 0) from walk->nbytes, + * or a -errno value to terminate the walk due to an error + * + * This function cleans up after one step of walking through the source and + * destination scatterlists, and advances to the next step if applicable. + * walk->nbytes is set to the number of bytes available in the next step, + * walk->total is set to the new total number of bytes remaining, and + * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there + * is no more data, or if an error occurred (i.e. -errno return), then + * walk->nbytes and walk->total are set to 0 and all resources owned by the + * skcipher_walk are freed. + * + * Return: 0 or a -errno value. If @res was a -errno value then it will be + * returned, but other errors may occur too. + */ +int skcipher_walk_done(struct skcipher_walk *walk, int res) +{ + unsigned int n = walk->nbytes; /* num bytes processed this step */ + unsigned int total = 0; /* new total remaining */ + + if (!n) + goto finish; + + if (likely(res >= 0)) { + n -= res; /* subtract num bytes *not* processed */ + total = walk->total - n; + } + + if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { + scatterwalk_advance(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_DIFF) { + scatterwalk_done_src(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_COPY) { + scatterwalk_advance(&walk->in, n); + scatterwalk_map(&walk->out); + memcpy(walk->out.addr, walk->page, n); + } else { /* SKCIPHER_WALK_SLOW */ + if (res > 0) { + /* + * Didn't process all bytes. Either the algorithm is + * broken, or this was the last step and it turned out + * the message wasn't evenly divisible into blocks but + * the algorithm requires it. + */ + res = -EINVAL; + total = 0; + } else + memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); + goto dst_done; + } + + scatterwalk_done_dst(&walk->out, n); +dst_done: + + if (res > 0) + res = 0; + + walk->total = total; + walk->nbytes = 0; + + if (total) { + if (walk->flags & SKCIPHER_WALK_SLEEP) + cond_resched(); + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); + return skcipher_walk_next(walk); + } + +finish: + /* Short-circuit for the common/fast path. */ + if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) + goto out; + + if (walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + +out: + return res; +} +EXPORT_SYMBOL_GPL(skcipher_walk_done); diff --git a/crypto/scompress.c b/crypto/scompress.c index 60bbb7ea4060..c651e7f2197a 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -7,26 +7,30 @@ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> */ -#include <crypto/internal/acompress.h> #include <crypto/internal/scompress.h> #include <crypto/scatterwalk.h> +#include <linux/cpumask.h> #include <linux/cryptouser.h> #include <linux/err.h> +#include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/vmalloc.h> +#include <linux/workqueue.h> #include <net/netlink.h> #include "compress.h" struct scomp_scratch { spinlock_t lock; - void *src; - void *dst; + union { + void *src; + unsigned long saddr; + }; }; static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { @@ -37,6 +41,10 @@ static const struct crypto_type crypto_scomp_type; static int scomp_scratch_users; static DEFINE_MUTEX(scomp_lock); +static cpumask_t scomp_scratch_want; +static void scomp_scratch_workfn(struct work_struct *work); +static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn); + static int __maybe_unused crypto_scomp_report( struct sk_buff *skb, struct crypto_alg *alg) { @@ -66,119 +74,206 @@ static void crypto_scomp_free_scratches(void) for_each_possible_cpu(i) { scratch = per_cpu_ptr(&scomp_scratch, i); - vfree(scratch->src); - vfree(scratch->dst); + free_page(scratch->saddr); scratch->src = NULL; - scratch->dst = NULL; } } -static int crypto_scomp_alloc_scratches(void) +static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) { - struct scomp_scratch *scratch; - int i; + int node = cpu_to_node(cpu); + struct page *page; + + page = alloc_pages_node(node, GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + spin_lock_bh(&scratch->lock); + scratch->src = page_address(page); + spin_unlock_bh(&scratch->lock); + return 0; +} - for_each_possible_cpu(i) { - void *mem; +static void scomp_scratch_workfn(struct work_struct *work) +{ + int cpu; - scratch = per_cpu_ptr(&scomp_scratch, i); + for_each_cpu(cpu, &scomp_scratch_want) { + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, cpu); + if (scratch->src) + continue; + if (scomp_alloc_scratch(scratch, cpu)) + break; - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) - goto error; - scratch->src = mem; - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) - goto error; - scratch->dst = mem; + cpumask_clear_cpu(cpu, &scomp_scratch_want); } - return 0; -error: - crypto_scomp_free_scratches(); - return -ENOMEM; +} + +static int crypto_scomp_alloc_scratches(void) +{ + unsigned int i = cpumask_first(cpu_possible_mask); + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, i); + return scomp_alloc_scratch(scratch, i); } static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) { + struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); int ret = 0; mutex_lock(&scomp_lock); - if (!scomp_scratch_users++) + ret = crypto_acomp_alloc_streams(&alg->streams); + if (ret) + goto unlock; + if (!scomp_scratch_users++) { ret = crypto_scomp_alloc_scratches(); + if (ret) + scomp_scratch_users--; + } +unlock: mutex_unlock(&scomp_lock); return ret; } +static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) +{ + int cpu = raw_smp_processor_id(); + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, cpu); + spin_lock(&scratch->lock); + if (likely(scratch->src)) + return scratch; + spin_unlock(&scratch->lock); + + cpumask_set_cpu(cpu, &scomp_scratch_want); + schedule_work(&scomp_scratch_work); + + scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask)); + spin_lock(&scratch->lock); + return scratch; +} + +static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) + __releases(scratch) +{ + spin_unlock(&scratch->lock); +} + static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); + bool src_isvirt = acomp_request_src_isvirt(req); + bool dst_isvirt = acomp_request_dst_isvirt(req); struct crypto_scomp *scomp = *tfm_ctx; - void **ctx = acomp_request_ctx(req); + struct crypto_acomp_stream *stream; struct scomp_scratch *scratch; - void *src, *dst; - unsigned int dlen; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + struct page *spage, *dpage; + unsigned int n; + const u8 *src; + size_t soff; + size_t doff; + u8 *dst; int ret; - if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) + if (!req->src || !slen) return -EINVAL; - if (req->dst && !req->dlen) + if (!req->dst || !dlen) return -EINVAL; - if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) - req->dlen = SCOMP_SCRATCH_SIZE; - - dlen = req->dlen; - - scratch = raw_cpu_ptr(&scomp_scratch); - spin_lock(&scratch->lock); + if (dst_isvirt) + dst = req->dvirt; + else { + if (dlen <= req->dst->length) { + dpage = sg_page(req->dst); + doff = req->dst->offset; + } else + return -ENOSYS; + + dpage = nth_page(dpage, doff / PAGE_SIZE); + doff = offset_in_page(doff); + + n = (dlen - 1) / PAGE_SIZE; + n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE; + if (PageHighMem(dpage + n) && + size_add(doff, dlen) > PAGE_SIZE) + return -ENOSYS; + dst = kmap_local_page(dpage) + doff; + } - if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) { - src = page_to_virt(sg_page(req->src)) + req->src->offset; - } else { - scatterwalk_map_and_copy(scratch->src, req->src, 0, - req->slen, 0); - src = scratch->src; + if (src_isvirt) + src = req->svirt; + else { + src = NULL; + do { + if (slen <= req->src->length) { + spage = sg_page(req->src); + soff = req->src->offset; + } else + break; + + spage = nth_page(spage, soff / PAGE_SIZE); + soff = offset_in_page(soff); + + n = (slen - 1) / PAGE_SIZE; + n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE; + if (PageHighMem(nth_page(spage, n)) && + size_add(soff, slen) > PAGE_SIZE) + break; + src = kmap_local_page(spage) + soff; + } while (0); } - if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst))) - dst = page_to_virt(sg_page(req->dst)) + req->dst->offset; - else - dst = scratch->dst; + stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); - if (dir) - ret = crypto_scomp_compress(scomp, src, req->slen, - dst, &req->dlen, *ctx); + if (!src_isvirt && !src) { + const u8 *src; + + scratch = scomp_lock_scratch(); + src = scratch->src; + memcpy_from_sglist(scratch->src, req->src, 0, slen); + + if (dir) + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); + else + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + scomp_unlock_scratch(scratch); + } else if (dir) + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); else - ret = crypto_scomp_decompress(scomp, src, req->slen, - dst, &req->dlen, *ctx); - if (!ret) { - if (!req->dst) { - req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); - if (!req->dst) { - ret = -ENOMEM; - goto out; - } - } else if (req->dlen > dlen) { - ret = -ENOSPC; - goto out; - } - if (dst == scratch->dst) { - scatterwalk_map_and_copy(scratch->dst, req->dst, 0, - req->dlen, 1); - } else { - int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE); - int i; - struct page *dst_page = sg_page(req->dst); - - for (i = 0; i < nr_pages; i++) - flush_dcache_page(dst_page + i); + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + crypto_acomp_unlock_stream_bh(stream); + + req->dlen = dlen; + + if (!src_isvirt && src) + kunmap_local(src); + if (!dst_isvirt) { + kunmap_local(dst); + dlen += doff; + for (;;) { + flush_dcache_page(dpage); + if (dlen <= PAGE_SIZE) + break; + dlen -= PAGE_SIZE; + dpage = nth_page(dpage, 1); } } -out: - spin_unlock(&scratch->lock); + return ret; } @@ -198,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) crypto_free_scomp(*ctx); + flush_work(&scomp_scratch_work); mutex_lock(&scomp_lock); if (!--scomp_scratch_users) crypto_scomp_free_scratches(); @@ -225,67 +321,49 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) crt->compress = scomp_acomp_compress; crt->decompress = scomp_acomp_decompress; - crt->dst_free = sgl_free; - crt->reqsize = sizeof(void *); return 0; } -struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) +static void crypto_scomp_destroy(struct crypto_alg *alg) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx; - - ctx = crypto_scomp_alloc_ctx(scomp); - if (IS_ERR(ctx)) { - kfree(req); - return NULL; - } + struct scomp_alg *scomp = __crypto_scomp_alg(alg); - *req->__ctx = ctx; - - return req; -} - -void crypto_acomp_scomp_free_ctx(struct acomp_req *req) -{ - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx = *req->__ctx; - - if (ctx) - crypto_scomp_free_ctx(scomp, ctx); + crypto_acomp_free_streams(&scomp->streams); } static const struct crypto_type crypto_scomp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_scomp_init_tfm, + .destroy = crypto_scomp_destroy, #ifdef CONFIG_PROC_FS .show = crypto_scomp_show, #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_scomp_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_acomp_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SCOMPRESS, .tfmsize = offsetof(struct crypto_scomp, base), + .algsize = offsetof(struct scomp_alg, base), }; -int crypto_register_scomp(struct scomp_alg *alg) +static void scomp_prepare_alg(struct scomp_alg *alg) { struct crypto_alg *base = &alg->calg.base; comp_prepare_alg(&alg->calg); + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; +} + +int crypto_register_scomp(struct scomp_alg *alg) +{ + struct crypto_alg *base = &alg->calg.base; + + scomp_prepare_alg(alg); + base->cra_type = &crypto_scomp_type; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; diff --git a/crypto/seed.c b/crypto/seed.c index d0506ade2a5f..815391f213de 100644 --- a/crypto/seed.c +++ b/crypto/seed.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #define SEED_NUM_KCONSTANTS 16 #define SEED_KEY_SIZE 16 @@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct seed_ctx *ctx = crypto_tfm_ctx(tfm); u32 *keyout = ctx->keysched; - const __be32 *key = (const __be32 *)in_key; u32 i, t0, t1, x1, x2, x3, x4; - x1 = be32_to_cpu(key[0]); - x2 = be32_to_cpu(key[1]); - x3 = be32_to_cpu(key[2]); - x4 = be32_to_cpu(key[3]); + x1 = get_unaligned_be32(&in_key[0]); + x2 = get_unaligned_be32(&in_key[4]); + x3 = get_unaligned_be32(&in_key[8]); + x4 = get_unaligned_be32(&in_key[12]); for (i = 0; i < SEED_NUM_KCONSTANTS; i++) { t0 = x1 + x3 - KC[i]; @@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 0); OP(x3, x4, x1, x2, 2); @@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 28); OP(x3, x4, x1, x2, 30); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } /* decrypt a block of text */ @@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 30); OP(x3, x4, x1, x2, 28); @@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 2); OP(x3, x4, x1, x2, 0); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } @@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = SEED_BLOCK_SIZE, .cra_ctxsize = sizeof(struct seed_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { @@ -466,7 +460,7 @@ static void __exit seed_fini(void) crypto_unregister_alg(&seed_alg); } -subsys_initcall(seed_init); +module_init(seed_init); module_exit(seed_fini); MODULE_DESCRIPTION("SEED Cipher Algorithm"); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 17e11d51ddc3..2bae99e33526 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -64,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req) data = req->base.data; info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { @@ -179,7 +168,7 @@ static void __exit seqiv_module_exit(void) crypto_unregister_template(&seqiv_tmpl); } -subsys_initcall(seqiv_module_init); +module_init(seqiv_module_init); module_exit(seqiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index c6bca47931e2..b21e7606c652 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -11,7 +11,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/types.h> #include <crypto/serpent.h> @@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void) crypto_unregister_alg(&srp_alg); } -subsys_initcall(serpent_mod_init); +module_init(serpent_mod_init); module_exit(serpent_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 325b57fe28dc..024e8043bab0 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -12,13 +12,11 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, @@ -39,38 +37,31 @@ static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, memzero_explicit(temp, sizeof(temp)); } -int crypto_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - return sha1_base_do_update(desc, data, len, sha1_generic_block_fn); + return sha1_base_do_update_blocks(desc, data, len, + sha1_generic_block_fn); } -EXPORT_SYMBOL(crypto_sha1_update); -static int sha1_final(struct shash_desc *desc, u8 *out) +static int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - sha1_base_do_finalize(desc, sha1_generic_block_fn); + sha1_base_do_finup(desc, data, len, sha1_generic_block_fn); return sha1_base_finish(desc, out); } -int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha1_base_do_update(desc, data, len, sha1_generic_block_fn); - return sha1_final(desc, out); -} -EXPORT_SYMBOL(crypto_sha1_finup); - static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = crypto_sha1_update, - .final = sha1_final, .finup = crypto_sha1_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -86,7 +77,7 @@ static void __exit sha1_generic_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(sha1_generic_mod_init); +module_init(sha1_generic_mod_init); module_exit(sha1_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha256.c b/crypto/sha256.c new file mode 100644 index 000000000000..4aeb213bab11 --- /dev/null +++ b/crypto/sha256.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API wrapper for the SHA-256 and SHA-224 library functions + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + */ +#include <crypto/internal/hash.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + +static int crypto_sha256_init(struct shash_desc *desc) +{ + sha256_block_init(shash_desc_ctx(desc)); + return 0; +} + +static inline int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len, bool force_generic) +{ + struct crypto_sha256_state *sctx = shash_desc_ctx(desc); + int remain = len % SHA256_BLOCK_SIZE; + + sctx->count += len - remain; + sha256_choose_blocks(sctx->state, data, len / SHA256_BLOCK_SIZE, + force_generic, !force_generic); + return remain; +} + +static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_sha256_update(desc, data, len, true); +} + +static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + sha256_update(shash_desc_ctx(desc), data, len); + return 0; +} + +static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_sha256_update(desc, data, len, false); +} + +static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out) +{ + sha256_final(shash_desc_ctx(desc), out); + return 0; +} + +static __always_inline int crypto_sha256_finup(struct shash_desc *desc, + const u8 *data, + unsigned int len, u8 *out, + bool force_generic) +{ + struct crypto_sha256_state *sctx = shash_desc_ctx(desc); + unsigned int remain = len; + u8 *buf; + + if (len >= SHA256_BLOCK_SIZE) + remain = crypto_sha256_update(desc, data, len, force_generic); + sctx->count += remain; + buf = memcpy(sctx + 1, data + len - remain, remain); + sha256_finup(sctx, buf, remain, out, + crypto_shash_digestsize(desc->tfm), force_generic, + !force_generic); + return 0; +} + +static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_sha256_finup(desc, data, len, out, true); +} + +static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_sha256_finup(desc, data, len, out, false); +} + +static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + crypto_sha256_init(desc); + return crypto_sha256_finup_generic(desc, data, len, out); +} + +static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + sha256(data, len, out); + return 0; +} + +static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + crypto_sha256_init(desc); + return crypto_sha256_finup_arch(desc, data, len, out); +} + +static int crypto_sha224_init(struct shash_desc *desc) +{ + sha224_block_init(shash_desc_ctx(desc)); + return 0; +} + +static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out) +{ + sha224_final(shash_desc_ctx(desc), out); + return 0; +} + +static int crypto_sha256_import_lib(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + const u8 *p = in; + + memcpy(sctx, p, sizeof(*sctx)); + p += sizeof(*sctx); + sctx->count += *p; + return 0; +} + +static int crypto_sha256_export_lib(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx0 = shash_desc_ctx(desc); + struct sha256_state sctx = *sctx0; + unsigned int partial; + u8 *p = out; + + partial = sctx.count % SHA256_BLOCK_SIZE; + sctx.count -= partial; + memcpy(p, &sctx, sizeof(sctx)); + p += sizeof(sctx); + *p = partial; + return 0; +} + +static struct shash_alg algs[] = { + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_generic, + .finup = crypto_sha256_finup_generic, + .digest = crypto_sha256_digest_generic, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_generic, + .finup = crypto_sha256_finup_generic, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-lib", + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_lib, + .final = crypto_sha256_final_lib, + .digest = crypto_sha256_digest_lib, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct crypto_sha256_state) + + SHA256_BLOCK_SIZE + 1, + .import = crypto_sha256_import_lib, + .export = crypto_sha256_export_lib, + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-lib", + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_lib, + .final = crypto_sha224_final_lib, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct crypto_sha256_state) + + SHA256_BLOCK_SIZE + 1, + .import = crypto_sha256_import_lib, + .export = crypto_sha256_export_lib, + }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .digest = crypto_sha256_digest_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, +}; + +static unsigned int num_algs; + +static int __init crypto_sha256_mod_init(void) +{ + /* register the arch flavours only if they differ from generic */ + num_algs = ARRAY_SIZE(algs); + BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2); + if (!sha256_is_arch_optimized()) + num_algs -= 2; + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha256_mod_init); + +static void __exit crypto_sha256_mod_exit(void) +{ + crypto_unregister_shashes(algs, num_algs); +} +module_exit(crypto_sha256_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API wrapper for the SHA-256 and SHA-224 library functions"); + +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-generic"); +MODULE_ALIAS_CRYPTO("sha256-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-generic"); +MODULE_ALIAS_CRYPTO("sha224-" __stringify(ARCH)); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c deleted file mode 100644 index bf147b01e313..000000000000 --- a/crypto/sha256_generic.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> - -const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f -}; -EXPORT_SYMBOL_GPL(sha224_zero_message_hash); - -const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; -EXPORT_SYMBOL_GPL(sha256_zero_message_hash); - -int crypto_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return 0; -} -EXPORT_SYMBOL(crypto_sha256_update); - -static int crypto_sha256_final(struct shash_desc *desc, u8 *out) -{ - if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) - sha224_final(shash_desc_ctx(desc), out); - else - sha256_final(shash_desc_ctx(desc), out); - return 0; -} - -int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return crypto_sha256_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha256_finup); - -static struct shash_alg sha256_algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-generic", - .cra_priority = 100, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-generic", - .cra_priority = 100, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha256_generic_mod_init(void) -{ - return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -static void __exit sha256_generic_mod_fini(void) -{ - crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -subsys_initcall(sha256_generic_mod_init); -module_exit(sha256_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-generic"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-generic"); diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 3e4069935b53..41d1e506e6de 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -9,11 +9,11 @@ * Ard Biesheuvel <ard.biesheuvel@linaro.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/types.h> #include <crypto/sha3.h> -#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> /* * On some 32-bit architectures (h8300), GCC ends up using @@ -161,68 +161,51 @@ static void keccakf(u64 st[25]) int crypto_sha3_init(struct shash_desc *desc) { struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - - sctx->rsiz = 200 - 2 * digest_size; - sctx->rsizw = sctx->rsiz / 8; - sctx->partial = 0; memset(sctx->st, 0, sizeof(sctx->st)); return 0; } EXPORT_SYMBOL(crypto_sha3_init); -int crypto_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { + unsigned int rsiz = crypto_shash_blocksize(desc->tfm); struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int done; - const u8 *src; - - done = 0; - src = data; - - if ((sctx->partial + len) > (sctx->rsiz - 1)) { - if (sctx->partial) { - done = -sctx->partial; - memcpy(sctx->buf + sctx->partial, data, - done + sctx->rsiz); - src = sctx->buf; - } + unsigned int rsizw = rsiz / 8; - do { - unsigned int i; + do { + int i; - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(src + 8 * i); - keccakf(sctx->st); + for (i = 0; i < rsizw; i++) + sctx->st[i] ^= get_unaligned_le64(data + 8 * i); + keccakf(sctx->st); - done += sctx->rsiz; - src = data + done; - } while (done + (sctx->rsiz - 1) < len); - - sctx->partial = 0; - } - memcpy(sctx->buf + sctx->partial, src, len - done); - sctx->partial += (len - done); - - return 0; + data += rsiz; + len -= rsiz; + } while (len >= rsiz); + return len; } -EXPORT_SYMBOL(crypto_sha3_update); -int crypto_sha3_final(struct shash_desc *desc, u8 *out) +static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int i, inlen = sctx->partial; unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + unsigned int rsiz = crypto_shash_blocksize(desc->tfm); + struct sha3_state *sctx = shash_desc_ctx(desc); + __le64 block[SHA3_224_BLOCK_SIZE / 8] = {}; __le64 *digest = (__le64 *)out; + unsigned int rsizw = rsiz / 8; + u8 *p; + int i; - sctx->buf[inlen++] = 0x06; - memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); - sctx->buf[sctx->rsiz - 1] |= 0x80; + p = memcpy(block, src, len); + p[len++] = 0x06; + p[rsiz - 1] |= 0x80; - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); + for (i = 0; i < rsizw; i++) + sctx->st[i] ^= le64_to_cpu(block[i]); + memzero_explicit(block, sizeof(block)); keccakf(sctx->st); @@ -232,49 +215,51 @@ int crypto_sha3_final(struct shash_desc *desc, u8 *out) if (digest_size & 4) put_unaligned_le32(sctx->st[i], (__le32 *)digest); - memset(sctx, 0, sizeof(*sctx)); return 0; } -EXPORT_SYMBOL(crypto_sha3_final); static struct shash_alg algs[] = { { .digestsize = SHA3_224_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_256_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_384_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_512_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; @@ -289,7 +274,7 @@ static void __exit sha3_generic_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -subsys_initcall(sha3_generic_mod_init); +module_init(sha3_generic_mod_init); module_exit(sha3_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index be70e76d6d86..7368173f545e 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -6,17 +6,11 @@ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> */ #include <crypto/internal/hash.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/crypto.h> -#include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/percpu.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, @@ -145,47 +139,42 @@ sha512_transform(u64 *state, const u8 *input) state[4] += e; state[5] += f; state[6] += g; state[7] += h; } -static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, - int blocks) +void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, + int blocks) { - while (blocks--) { + do { sha512_transform(sst->state, src); src += SHA512_BLOCK_SIZE; - } + } while (--blocks); } +EXPORT_SYMBOL_GPL(sha512_generic_block_fn); -int crypto_sha512_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - return sha512_base_do_update(desc, data, len, sha512_generic_block_fn); + return sha512_base_do_update_blocks(desc, data, len, + sha512_generic_block_fn); } -EXPORT_SYMBOL(crypto_sha512_update); -static int sha512_final(struct shash_desc *desc, u8 *hash) +static int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash) { - sha512_base_do_finalize(desc, sha512_generic_block_fn); + sha512_base_do_finup(desc, data, len, sha512_generic_block_fn); return sha512_base_finish(desc, hash); } -int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha512_base_do_update(desc, data, len, sha512_generic_block_fn); - return sha512_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha512_finup); - static struct shash_alg sha512_algs[2] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = crypto_sha512_update, - .final = sha512_final, .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -193,13 +182,14 @@ static struct shash_alg sha512_algs[2] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = crypto_sha512_update, - .final = sha512_final, .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -215,7 +205,7 @@ static void __exit sha512_generic_mod_fini(void) crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs)); } -subsys_initcall(sha512_generic_mod_init); +module_init(sha512_generic_mod_init); module_exit(sha512_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/shash.c b/crypto/shash.c index c3f7f6a25280..4721f5f134f4 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,16 +16,22 @@ #include "hash.h" -static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) +static inline bool crypto_shash_block_only(struct crypto_shash *tfm) { - return hash_get_stat(&alg->halg); + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; } -static inline int crypto_shash_errstat(struct shash_alg *alg, int err) +static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm) { - if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err) - atomic64_inc(&shash_get_stat(alg)->err_cnt); - return err; + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_shash_finup_max(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINUP_MAX; } int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, @@ -58,34 +64,27 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -int crypto_shash_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int __crypto_shash_init(struct shash_desc *desc) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - int err; + struct crypto_shash *tfm = desc->tfm; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(len, &shash_get_stat(shash)->hash_tlen); + if (crypto_shash_block_only(tfm)) { + u8 *buf = shash_desc_ctx(desc); - err = shash->update(desc, data, len); + buf += crypto_shash_descsize(tfm) - 1; + *buf = 0; + } - return crypto_shash_errstat(shash, err); + return crypto_shash_alg(tfm)->init(desc); } -EXPORT_SYMBOL_GPL(crypto_shash_update); -int crypto_shash_final(struct shash_desc *desc, u8 *out) +int crypto_shash_init(struct shash_desc *desc) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&shash_get_stat(shash)->hash_cnt); - - err = shash->final(desc, out); - - return crypto_shash_errstat(shash, err); + if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return __crypto_shash_init(desc); } -EXPORT_SYMBOL_GPL(crypto_shash_final); +EXPORT_SYMBOL_GPL(crypto_shash_init); static int shash_default_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) @@ -96,55 +95,101 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data, shash->final(desc, out); } -int crypto_shash_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int crypto_shash_op_and_zero( + int (*op)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out), + struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + int err; + + err = op(desc, data, len, out); + memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm)); + return err; +} + +int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data, + unsigned int len, u8 *restrict out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); + u8 *blenp = shash_desc_ctx(desc); + bool finup_max, nonzero; + unsigned int bs; int err; + u8 *buf; + + if (!crypto_shash_block_only(tfm)) { + if (out) + goto finup; + return crypto_shash_alg(tfm)->update(desc, data, len); + } + + finup_max = out && crypto_shash_finup_max(tfm); + + /* Retain extra block for final nonzero algorithms. */ + nonzero = crypto_shash_final_nonzero(tfm); + + /* + * The partial block buffer follows the algorithm desc context. + * The byte following that contains the length. + */ + blenp += crypto_shash_descsize(tfm) - 1; + bs = crypto_shash_blocksize(tfm); + buf = blenp - bs; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = shash_get_stat(shash); + if (likely(!*blenp && finup_max)) + goto finup; - atomic64_inc(&istat->hash_cnt); - atomic64_add(len, &istat->hash_tlen); + while ((*blenp + len) >= bs + nonzero) { + unsigned int nbytes = len - nonzero; + const u8 *src = data; + + if (*blenp) { + memcpy(buf + *blenp, data, bs - *blenp); + nbytes = bs; + src = buf; + } + + err = crypto_shash_alg(tfm)->update(desc, src, nbytes); + if (err < 0) + return err; + + data += nbytes - err - *blenp; + len -= nbytes - err - *blenp; + *blenp = 0; } - err = shash->finup(desc, data, len, out); + if (*blenp || !out) { + memcpy(buf + *blenp, data, len); + *blenp += len; + if (!out) + return 0; + data = buf; + len = *blenp; + } - return crypto_shash_errstat(shash, err); +finup: + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_finup); static int shash_default_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - - return shash->init(desc) ?: - shash->finup(desc, data, len, out); + return __crypto_shash_init(desc) ?: + crypto_shash_finup(desc, data, len, out); } int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - int err; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = shash_get_stat(shash); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(len, &istat->hash_tlen); - } if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - err = -ENOKEY; - else - err = shash->digest(desc, data, len, out); + return -ENOKEY; - return crypto_shash_errstat(shash, err); + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_digest); @@ -152,44 +197,105 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, unsigned int len, u8 *out) { SHASH_DESC_ON_STACK(desc, tfm); - int err; desc->tfm = tfm; + return crypto_shash_digest(desc, data, len, out); +} +EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); - err = crypto_shash_digest(desc, data, len, out); +static int __crypto_shash_export(struct shash_desc *desc, void *out, + int (*export)(struct shash_desc *desc, + void *out)) +{ + struct crypto_shash *tfm = desc->tfm; + u8 *buf = shash_desc_ctx(desc); + unsigned int plen, ss; + + plen = crypto_shash_blocksize(tfm) + 1; + ss = crypto_shash_statesize(tfm); + if (crypto_shash_block_only(tfm)) + ss -= plen; + if (!export) { + memcpy(out, buf, ss); + return 0; + } - shash_desc_zero(desc); + return export(desc, out); +} - return err; +int crypto_shash_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_shash_export(desc, out, + crypto_shash_alg(desc->tfm)->export_core); } -EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); +EXPORT_SYMBOL_GPL(crypto_shash_export_core); int crypto_shash_export(struct shash_desc *desc, void *out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - if (shash->export) - return shash->export(desc, out); + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm)); - return 0; + memcpy(out + ss - plen, buf + descsize - plen, plen); + } + return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export); } EXPORT_SYMBOL_GPL(crypto_shash_export); -int crypto_shash_import(struct shash_desc *desc, const void *in) +static int __crypto_shash_import(struct shash_desc *desc, const void *in, + int (*import)(struct shash_desc *desc, + const void *in)) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned int descsize, plen, ss; + u8 *buf = shash_desc_ctx(desc); if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - if (shash->import) - return shash->import(desc, in); + ss = crypto_shash_statesize(tfm); + if (crypto_shash_block_only(tfm)) { + plen = crypto_shash_blocksize(tfm) + 1; + ss -= plen; + descsize = crypto_shash_descsize(tfm); + buf[descsize - 1] = 0; + } + if (!import) { + memcpy(buf, in, ss); + return 0; + } - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); - return 0; + return import(desc, in); +} + +int crypto_shash_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_shash_import(desc, in, + crypto_shash_alg(desc->tfm)->import_core); +} +EXPORT_SYMBOL_GPL(crypto_shash_import_core); + +int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + int err; + + err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import); + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); + + memcpy(buf + descsize - plen, in + ss - plen, plen); + if (buf[descsize - 1] >= plen) + err = -EOVERFLOW; + } + return err; } EXPORT_SYMBOL_GPL(crypto_shash_import); @@ -205,9 +311,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); struct shash_alg *alg = crypto_shash_alg(hash); - int err; - - hash->descsize = alg->descsize; shash_set_needkey(hash, alg); @@ -217,18 +320,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) if (!alg->init_tfm) return 0; - err = alg->init_tfm(hash); - if (err) - return err; - - /* ->init_tfm() may have increased the descsize. */ - if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) { - if (alg->exit_tfm) - alg->exit_tfm(hash); - return -EINVAL; - } - - return 0; + return alg->init_tfm(hash); } static void crypto_shash_free_instance(struct crypto_instance *inst) @@ -265,12 +357,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "digestsize : %u\n", salg->digestsize); } -static int __maybe_unused crypto_shash_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - return crypto_hash_report_stat(skb, alg, "shash"); -} - const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, @@ -281,13 +367,11 @@ const struct crypto_type crypto_shash_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_shash_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_shash_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SHASH, .tfmsize = offsetof(struct crypto_shash, base), + .algsize = offsetof(struct shash_alg, base), }; int crypto_grab_shash(struct crypto_shash_spawn *spawn, @@ -334,8 +418,6 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) if (IS_ERR(nhash)) return nhash; - nhash->descsize = hash->descsize; - if (alg->clone_tfm) { err = alg->clone_tfm(nhash, hash); if (err) { @@ -344,13 +426,15 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) } } + if (alg->exit_tfm) + crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm; + return nhash; } EXPORT_SYMBOL_GPL(crypto_clone_shash); int hash_prepare_alg(struct hash_alg_common *alg) { - struct crypto_istat_hash *istat = hash_get_stat(alg); struct crypto_alg *base = &alg->base; if (alg->digestsize > HASH_MAX_DIGESTSIZE) @@ -362,20 +446,24 @@ int hash_prepare_alg(struct hash_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } +static int shash_default_export_core(struct shash_desc *desc, void *out) +{ + return -ENOSYS; +} + +static int shash_default_import_core(struct shash_desc *desc, const void *in) +{ + return -ENOSYS; +} + static int shash_prepare_alg(struct shash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; - if (alg->descsize > HASH_MAX_DESCSIZE) - return -EINVAL; - if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; @@ -385,6 +473,7 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; /* * Handle missing optional functions. For each one we can either @@ -401,11 +490,30 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->finup = shash_default_finup; if (!alg->digest) alg->digest = shash_default_digest; - if (!alg->export) + if (!alg->export && !alg->halg.statesize) alg->halg.statesize = alg->descsize; if (!alg->setkey) alg->setkey = shash_no_setkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + alg->descsize += base->cra_blocksize + 1; + alg->statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = shash_default_export_core; + alg->import_core = shash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + + if (alg->descsize > HASH_MAX_DESCSIZE) + return -EINVAL; + if (alg->statesize > HASH_MAX_STATESIZE) + return -EINVAL; + + base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize; + return 0; } diff --git a/crypto/sig.c b/crypto/sig.c index 224c47019297..beba745b6405 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -5,30 +5,45 @@ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <crypto/akcipher.h> #include <crypto/internal/sig.h> #include <linux/cryptouser.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/string.h> #include <net/netlink.h> #include "internal.h" -#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e +static void crypto_sig_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_sig *sig = __crypto_sig_tfm(tfm); + struct sig_alg *alg = crypto_sig_alg(sig); -static const struct crypto_type crypto_sig_type; + alg->exit(sig); +} static int crypto_sig_init_tfm(struct crypto_tfm *tfm) { - if (tfm->__crt_alg->cra_type != &crypto_sig_type) - return crypto_init_akcipher_ops_sig(tfm); + struct crypto_sig *sig = __crypto_sig_tfm(tfm); + struct sig_alg *alg = crypto_sig_alg(sig); + + if (alg->exit) + sig->base.exit = crypto_sig_exit_tfm; + + if (alg->init) + return alg->init(sig); return 0; } +static void crypto_sig_free_instance(struct crypto_instance *inst) +{ + struct sig_instance *sig = sig_instance(inst); + + sig->free(sig); +} + static void __maybe_unused crypto_sig_show(struct seq_file *m, struct crypto_alg *alg) { @@ -38,39 +53,28 @@ static void __maybe_unused crypto_sig_show(struct seq_file *m, static int __maybe_unused crypto_sig_report(struct sk_buff *skb, struct crypto_alg *alg) { - struct crypto_report_akcipher rsig = {}; + struct crypto_report_sig rsig = {}; strscpy(rsig.type, "sig", sizeof(rsig.type)); - return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); -} - -static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, - struct crypto_alg *alg) -{ - struct crypto_stat_akcipher rsig = {}; - - strscpy(rsig.type, "sig", sizeof(rsig.type)); - - return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); + return nla_put(skb, CRYPTOCFGA_REPORT_SIG, sizeof(rsig), &rsig); } static const struct crypto_type crypto_sig_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_sig_init_tfm, + .free = crypto_sig_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_sig_show, #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_sig_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_sig_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_SIG_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SIG, .tfmsize = offsetof(struct crypto_sig, base), + .algsize = offsetof(struct sig_alg, base), }; struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) @@ -79,74 +83,100 @@ struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_sig); -int crypto_sig_maxsize(struct crypto_sig *tfm) +static int sig_default_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) { - struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + return -ENOSYS; +} - return crypto_akcipher_maxsize(*ctx); +static int sig_default_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *dst, unsigned int dlen) +{ + return -ENOSYS; } -EXPORT_SYMBOL_GPL(crypto_sig_maxsize); -int crypto_sig_sign(struct crypto_sig *tfm, - const void *src, unsigned int slen, - void *dst, unsigned int dlen) +static int sig_default_set_key(struct crypto_sig *tfm, + const void *key, unsigned int keylen) { - struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); - struct crypto_akcipher_sync_data data = { - .tfm = *ctx, - .src = src, - .dst = dst, - .slen = slen, - .dlen = dlen, - }; - - return crypto_akcipher_sync_prep(&data) ?: - crypto_akcipher_sync_post(&data, - crypto_akcipher_sign(data.req)); + return -ENOSYS; } -EXPORT_SYMBOL_GPL(crypto_sig_sign); -int crypto_sig_verify(struct crypto_sig *tfm, - const void *src, unsigned int slen, - const void *digest, unsigned int dlen) +static unsigned int sig_default_size(struct crypto_sig *tfm) { - struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); - struct crypto_akcipher_sync_data data = { - .tfm = *ctx, - .src = src, - .slen = slen, - .dlen = dlen, - }; + return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE); +} + +static int sig_prepare_alg(struct sig_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (!alg->sign) + alg->sign = sig_default_sign; + if (!alg->verify) + alg->verify = sig_default_verify; + if (!alg->set_priv_key) + alg->set_priv_key = sig_default_set_key; + if (!alg->set_pub_key) + return -EINVAL; + if (!alg->key_size) + return -EINVAL; + if (!alg->max_size) + alg->max_size = sig_default_size; + if (!alg->digest_size) + alg->digest_size = sig_default_size; + + base->cra_type = &crypto_sig_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SIG; + + return 0; +} + +int crypto_register_sig(struct sig_alg *alg) +{ + struct crypto_alg *base = &alg->base; int err; - err = crypto_akcipher_sync_prep(&data); + err = sig_prepare_alg(alg); if (err) return err; - memcpy(data.buf + slen, digest, dlen); + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_sig); - return crypto_akcipher_sync_post(&data, - crypto_akcipher_verify(data.req)); +void crypto_unregister_sig(struct sig_alg *alg) +{ + crypto_unregister_alg(&alg->base); } -EXPORT_SYMBOL_GPL(crypto_sig_verify); +EXPORT_SYMBOL_GPL(crypto_unregister_sig); -int crypto_sig_set_pubkey(struct crypto_sig *tfm, - const void *key, unsigned int keylen) +int sig_register_instance(struct crypto_template *tmpl, + struct sig_instance *inst) { - struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; - return crypto_akcipher_set_pub_key(*ctx, key, keylen); + err = sig_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, sig_crypto_instance(inst)); } -EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey); +EXPORT_SYMBOL_GPL(sig_register_instance); -int crypto_sig_set_privkey(struct crypto_sig *tfm, - const void *key, unsigned int keylen) +int crypto_grab_sig(struct crypto_sig_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { - struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); - - return crypto_akcipher_set_priv_key(*ctx, key, keylen); + spawn->base.frontend = &crypto_sig_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } -EXPORT_SYMBOL_GPL(crypto_sig_set_privkey); +EXPORT_SYMBOL_GPL(crypto_grab_sig); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Public Key Signature Algorithms"); diff --git a/crypto/simd.c b/crypto/simd.c index edaa479a1ec5..b07721d1f3f6 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) return 0; } -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename) { struct simd_skcipher_alg *salg; - struct crypto_skcipher *tfm; - struct skcipher_alg *ialg; struct skcipher_alg *alg; int err; - tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_skcipher_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_skcipher(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; + goto out; } EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_skcipher_create_compat(algname, drvname, basename); -} -EXPORT_SYMBOL_GPL(simd_skcipher_create); - void simd_skcipher_free(struct simd_skcipher_alg *salg) { crypto_unregister_skcipher(&salg->alg); @@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm) return 0; } -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename) +static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, + const char *algname, + const char *drvname, + const char *basename) { struct simd_aead_alg *salg; - struct crypto_aead *tfm; - struct aead_alg *ialg; struct aead_alg *alg; int err; - tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_aead_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_aead(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; -} -EXPORT_SYMBOL_GPL(simd_aead_create_compat); - -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_aead_create_compat(algname, drvname, basename); + goto out; } -EXPORT_SYMBOL_GPL(simd_aead_create); -void simd_aead_free(struct simd_aead_alg *salg) +static void simd_aead_free(struct simd_aead_alg *salg) { crypto_unregister_aead(&salg->alg); kfree(salg); } -EXPORT_SYMBOL_GPL(simd_aead_free); int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs) @@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_aead_create_compat(algname, drvname, basename); + simd = simd_aead_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -523,4 +477,5 @@ void simd_unregister_aeads(struct aead_alg *algs, int count, } EXPORT_SYMBOL_GPL(simd_unregister_aeads); +MODULE_DESCRIPTION("Shared crypto SIMD helpers"); MODULE_LICENSE("GPL"); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index bc70e159d27d..de5fc91bba26 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -17,462 +17,40 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> -#include <linux/list.h> -#include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "skcipher.h" #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e -enum { - SKCIPHER_WALK_PHYS = 1 << 0, - SKCIPHER_WALK_SLOW = 1 << 1, - SKCIPHER_WALK_COPY = 1 << 2, - SKCIPHER_WALK_DIFF = 1 << 3, - SKCIPHER_WALK_SLEEP = 1 << 4, -}; - -struct skcipher_walk_buffer { - struct list_head entry; - struct scatter_walk dst; - unsigned int len; - u8 *data; - u8 buffer[]; -}; - static const struct crypto_type crypto_skcipher_type; -static int skcipher_walk_next(struct skcipher_walk *walk); - -static inline void skcipher_map_src(struct skcipher_walk *walk) -{ - walk->src.virt.addr = scatterwalk_map(&walk->in); -} - -static inline void skcipher_map_dst(struct skcipher_walk *walk) -{ - walk->dst.virt.addr = scatterwalk_map(&walk->out); -} - -static inline void skcipher_unmap_src(struct skcipher_walk *walk) -{ - scatterwalk_unmap(walk->src.virt.addr); -} - -static inline void skcipher_unmap_dst(struct skcipher_walk *walk) -{ - scatterwalk_unmap(walk->dst.virt.addr); -} - -static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) -{ - return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; -} - -/* Get a spot of the specified length that does not straddle a page. - * The caller needs to ensure that there is enough space for this operation. - */ -static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) -{ - u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); - - return max(start, end_page); -} - static inline struct skcipher_alg *__crypto_skcipher_alg( struct crypto_alg *alg) { return container_of(alg, struct skcipher_alg, base); } -static inline struct crypto_istat_cipher *skcipher_get_stat( - struct skcipher_alg *alg) -{ - return skcipher_get_stat_common(&alg->co); -} - -static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) -{ - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&istat->err_cnt); - - return err; -} - -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) -{ - u8 *addr; - - addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); - addr = skcipher_get_spot(addr, bsize); - scatterwalk_copychunks(addr, &walk->out, bsize, - (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); - return 0; -} - -int skcipher_walk_done(struct skcipher_walk *walk, int err) -{ - unsigned int n = walk->nbytes; - unsigned int nbytes = 0; - - if (!n) - goto finish; - - if (likely(err >= 0)) { - n -= err; - nbytes = walk->total - n; - } - - if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | - SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF)))) { -unmap_src: - skcipher_unmap_src(walk); - } else if (walk->flags & SKCIPHER_WALK_DIFF) { - skcipher_unmap_dst(walk); - goto unmap_src; - } else if (walk->flags & SKCIPHER_WALK_COPY) { - skcipher_map_dst(walk); - memcpy(walk->dst.virt.addr, walk->page, n); - skcipher_unmap_dst(walk); - } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (err > 0) { - /* - * Didn't process all bytes. Either the algorithm is - * broken, or this was the last step and it turned out - * the message wasn't evenly divisible into blocks but - * the algorithm requires it. - */ - err = -EINVAL; - nbytes = 0; - } else - n = skcipher_done_slow(walk, n); - } - - if (err > 0) - err = 0; - - walk->total = nbytes; - walk->nbytes = 0; - - scatterwalk_advance(&walk->in, n); - scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); - - if (nbytes) { - crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? - CRYPTO_TFM_REQ_MAY_SLEEP : 0); - return skcipher_walk_next(walk); - } - -finish: - /* Short-circuit for the common/fast path. */ - if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) - goto out; - - if (walk->flags & SKCIPHER_WALK_PHYS) - goto out; - - if (walk->iv != walk->oiv) - memcpy(walk->oiv, walk->iv, walk->ivsize); - if (walk->buffer != walk->page) - kfree(walk->buffer); - if (walk->page) - free_page((unsigned long)walk->page); - -out: - return err; -} -EXPORT_SYMBOL_GPL(skcipher_walk_done); - -void skcipher_walk_complete(struct skcipher_walk *walk, int err) -{ - struct skcipher_walk_buffer *p, *tmp; - - list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { - u8 *data; - - if (err) - goto done; - - data = p->data; - if (!data) { - data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); - data = skcipher_get_spot(data, walk->stride); - } - - scatterwalk_copychunks(data, &p->dst, p->len, 1); - - if (offset_in_page(p->data) + p->len + walk->stride > - PAGE_SIZE) - free_page((unsigned long)p->data); - -done: - list_del(&p->entry); - kfree(p); - } - - if (!err && walk->iv != walk->oiv) - memcpy(walk->oiv, walk->iv, walk->ivsize); - if (walk->buffer != walk->page) - kfree(walk->buffer); - if (walk->page) - free_page((unsigned long)walk->page); -} -EXPORT_SYMBOL_GPL(skcipher_walk_complete); - -static void skcipher_queue_write(struct skcipher_walk *walk, - struct skcipher_walk_buffer *p) -{ - p->dst = walk->out; - list_add_tail(&p->entry, &walk->buffers); -} - -static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) -{ - bool phys = walk->flags & SKCIPHER_WALK_PHYS; - unsigned alignmask = walk->alignmask; - struct skcipher_walk_buffer *p; - unsigned a; - unsigned n; - u8 *buffer; - void *v; - - if (!phys) { - if (!walk->buffer) - walk->buffer = walk->page; - buffer = walk->buffer; - if (buffer) - goto ok; - } - - /* Start with the minimum alignment of kmalloc. */ - a = crypto_tfm_ctx_alignment() - 1; - n = bsize; - - if (phys) { - /* Calculate the minimum alignment of p->buffer. */ - a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; - n += sizeof(*p); - } - - /* Minimum size to align p->buffer by alignmask. */ - n += alignmask & ~a; - - /* Minimum size to ensure p->buffer does not straddle a page. */ - n += (bsize - 1) & ~(alignmask | a); - - v = kzalloc(n, skcipher_walk_gfp(walk)); - if (!v) - return skcipher_walk_done(walk, -ENOMEM); - - if (phys) { - p = v; - p->len = bsize; - skcipher_queue_write(walk, p); - buffer = p->buffer; - } else { - walk->buffer = v; - buffer = v; - } - -ok: - walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); - walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); - walk->src.virt.addr = walk->dst.virt.addr; - - scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); - - walk->nbytes = bsize; - walk->flags |= SKCIPHER_WALK_SLOW; - - return 0; -} - -static int skcipher_next_copy(struct skcipher_walk *walk) -{ - struct skcipher_walk_buffer *p; - u8 *tmp = walk->page; - - skcipher_map_src(walk); - memcpy(tmp, walk->src.virt.addr, walk->nbytes); - skcipher_unmap_src(walk); - - walk->src.virt.addr = tmp; - walk->dst.virt.addr = tmp; - - if (!(walk->flags & SKCIPHER_WALK_PHYS)) - return 0; - - p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); - if (!p) - return -ENOMEM; - - p->data = walk->page; - p->len = walk->nbytes; - skcipher_queue_write(walk, p); - - if (offset_in_page(walk->page) + walk->nbytes + walk->stride > - PAGE_SIZE) - walk->page = NULL; - else - walk->page += walk->nbytes; - - return 0; -} - -static int skcipher_next_fast(struct skcipher_walk *walk) -{ - unsigned long diff; - - walk->src.phys.page = scatterwalk_page(&walk->in); - walk->src.phys.offset = offset_in_page(walk->in.offset); - walk->dst.phys.page = scatterwalk_page(&walk->out); - walk->dst.phys.offset = offset_in_page(walk->out.offset); - - if (walk->flags & SKCIPHER_WALK_PHYS) - return 0; - - diff = walk->src.phys.offset - walk->dst.phys.offset; - diff |= walk->src.virt.page - walk->dst.virt.page; - - skcipher_map_src(walk); - walk->dst.virt.addr = walk->src.virt.addr; - - if (diff) { - walk->flags |= SKCIPHER_WALK_DIFF; - skcipher_map_dst(walk); - } - - return 0; -} - -static int skcipher_walk_next(struct skcipher_walk *walk) +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic) { - unsigned int bsize; - unsigned int n; - int err; - - walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF); - - n = walk->total; - bsize = min(walk->stride, max(n, walk->blocksize)); - n = scatterwalk_clamp(&walk->in, n); - n = scatterwalk_clamp(&walk->out, n); - - if (unlikely(n < bsize)) { - if (unlikely(walk->total < walk->blocksize)) - return skcipher_walk_done(walk, -EINVAL); - -slow_path: - err = skcipher_next_slow(walk, bsize); - goto set_phys_lowmem; - } - - if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { - if (!walk->page) { - gfp_t gfp = skcipher_walk_gfp(walk); - - walk->page = (void *)__get_free_page(gfp); - if (!walk->page) - goto slow_path; - } - - walk->nbytes = min_t(unsigned, n, - PAGE_SIZE - offset_in_page(walk->page)); - walk->flags |= SKCIPHER_WALK_COPY; - err = skcipher_next_copy(walk); - goto set_phys_lowmem; - } - - walk->nbytes = n; - - return skcipher_next_fast(walk); - -set_phys_lowmem: - if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { - walk->src.phys.page = virt_to_page(walk->src.virt.addr); - walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); - walk->src.phys.offset &= PAGE_SIZE - 1; - walk->dst.phys.offset &= PAGE_SIZE - 1; - } - return err; -} - -static int skcipher_copy_iv(struct skcipher_walk *walk) -{ - unsigned a = crypto_tfm_ctx_alignment() - 1; - unsigned alignmask = walk->alignmask; - unsigned ivsize = walk->ivsize; - unsigned bs = walk->stride; - unsigned aligned_bs; - unsigned size; - u8 *iv; - - aligned_bs = ALIGN(bs, alignmask + 1); - - /* Minimum size to align buffer by alignmask. */ - size = alignmask & ~a; - - if (walk->flags & SKCIPHER_WALK_PHYS) - size += ivsize; - else { - size += aligned_bs + ivsize; - - /* Minimum size to ensure buffer does not straddle a page. */ - size += (bs - 1) & ~(alignmask | a); - } - - walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); - if (!walk->buffer) - return -ENOMEM; - - iv = PTR_ALIGN(walk->buffer, alignmask + 1); - iv = skcipher_get_spot(iv, bs) + aligned_bs; - - walk->iv = memcpy(iv, walk->iv, walk->ivsize); - return 0; -} - -static int skcipher_walk_first(struct skcipher_walk *walk) -{ - if (WARN_ON_ONCE(in_hardirq())) - return -EDEADLK; - - walk->buffer = NULL; - if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { - int err = skcipher_copy_iv(walk); - if (err) - return err; - } - - walk->page = NULL; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg; - return skcipher_walk_next(walk); -} + might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); -static int skcipher_walk_skcipher(struct skcipher_walk *walk, - struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + alg = crypto_skcipher_alg(tfm); walk->total = req->cryptlen; walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) + atomic = true; if (unlikely(!walk->total)) return 0; @@ -480,10 +58,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->flags &= ~SKCIPHER_WALK_SLEEP; - walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - SKCIPHER_WALK_SLEEP : 0; - walk->blocksize = crypto_skcipher_blocksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); @@ -493,81 +67,39 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, else walk->stride = alg->walksize; - return skcipher_walk_first(walk); -} - -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, bool atomic) -{ - int err; - - might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - - walk->flags &= ~SKCIPHER_WALK_PHYS; - - err = skcipher_walk_skcipher(walk, req); - - walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; - - return err; + return skcipher_walk_first(walk, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_virt); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req) -{ - walk->flags |= SKCIPHER_WALK_PHYS; - - INIT_LIST_HEAD(&walk->buffers); - - return skcipher_walk_skcipher(walk, req); -} -EXPORT_SYMBOL_GPL(skcipher_walk_async); - -static int skcipher_walk_aead_common(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - int err; walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) + atomic = true; if (unlikely(!walk->total)) return 0; - walk->flags &= ~SKCIPHER_WALK_PHYS; - - scatterwalk_start(&walk->in, req->src); - scatterwalk_start(&walk->out, req->dst); - - scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); - scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); - - scatterwalk_done(&walk->in, 0, walk->total); - scatterwalk_done(&walk->out, 0, walk->total); - - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) - walk->flags |= SKCIPHER_WALK_SLEEP; - else - walk->flags &= ~SKCIPHER_WALK_SLEEP; + scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen); + scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen); walk->blocksize = crypto_aead_blocksize(tfm); walk->stride = crypto_aead_chunksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); - err = skcipher_walk_first(walk); - - if (atomic) - walk->flags &= ~SKCIPHER_WALK_SLEEP; - - return err; + return skcipher_walk_first(walk, atomic); } -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { walk->total = req->cryptlen; @@ -575,8 +107,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -654,23 +187,12 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - int ret; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->cryptlen, &istat->encrypt_tlen); - } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (alg->co.base.cra_type != &crypto_skcipher_type) - ret = crypto_lskcipher_encrypt_sg(req); - else - ret = alg->encrypt(req); - - return crypto_skcipher_errstat(alg, ret); + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_encrypt_sg(req); + return alg->encrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); @@ -678,23 +200,12 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - int ret; - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(req->cryptlen, &istat->decrypt_tlen); - } if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (alg->co.base.cra_type != &crypto_skcipher_type) - ret = crypto_lskcipher_decrypt_sg(req); - else - ret = alg->decrypt(req); - - return crypto_skcipher_errstat(alg, ret); + return -ENOKEY; + if (alg->co.base.cra_type != &crypto_skcipher_type) + return crypto_lskcipher_decrypt_sg(req); + return alg->decrypt(req); } EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); @@ -816,7 +327,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "type : skcipher\n"); seq_printf(m, "async : %s\n", - alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); @@ -846,28 +357,6 @@ static int __maybe_unused crypto_skcipher_report( sizeof(rblkcipher), &rblkcipher); } -static int __maybe_unused crypto_skcipher_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); - struct crypto_istat_cipher *istat; - struct crypto_stat_cipher rcipher; - - istat = skcipher_get_stat(skcipher); - - memset(&rcipher, 0, sizeof(rcipher)); - - strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - - rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); - rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); - - return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); -} - static const struct crypto_type crypto_skcipher_type = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, @@ -878,13 +367,11 @@ static const struct crypto_type crypto_skcipher_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_skcipher_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_skcipher_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), + .algsize = offsetof(struct skcipher_alg, base), }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, @@ -910,6 +397,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( /* Only sync algorithms allowed. */ mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; + type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); @@ -935,7 +423,6 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher); int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { - struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || @@ -948,9 +435,6 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - memset(istat, 0, sizeof(*istat)); - return 0; } @@ -1155,4 +639,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/skcipher.h b/crypto/skcipher.h index 16c9484360da..703651367dd8 100644 --- a/crypto/skcipher.h +++ b/crypto/skcipher.h @@ -10,16 +10,6 @@ #include <crypto/internal/skcipher.h> #include "internal.h" -static inline struct crypto_istat_cipher *skcipher_get_stat_common( - struct skcipher_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); diff --git a/crypto/sm2.c b/crypto/sm2.c deleted file mode 100644 index 5ab120d74c59..000000000000 --- a/crypto/sm2.c +++ /dev/null @@ -1,498 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SM2 asymmetric public-key algorithm - * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and - * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 - * - * Copyright (c) 2020, Alibaba Group. - * Authors: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <linux/mpi.h> -#include <crypto/internal/akcipher.h> -#include <crypto/akcipher.h> -#include <crypto/hash.h> -#include <crypto/rng.h> -#include <crypto/sm2.h> -#include "sm2signature.asn1.h" - -/* The default user id as specified in GM/T 0009-2012 */ -#define SM2_DEFAULT_USERID "1234567812345678" -#define SM2_DEFAULT_USERID_LEN 16 - -#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8) - -struct ecc_domain_parms { - const char *desc; /* Description of the curve. */ - unsigned int nbits; /* Number of bits. */ - unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */ - - /* The model describing this curve. This is mainly used to select - * the group equation. - */ - enum gcry_mpi_ec_models model; - - /* The actual ECC dialect used. This is used for curve specific - * optimizations and to select encodings etc. - */ - enum ecc_dialects dialect; - - const char *p; /* The prime defining the field. */ - const char *a, *b; /* The coefficients. For Twisted Edwards - * Curves b is used for d. For Montgomery - * Curves (a,b) has ((A-2)/4,B^-1). - */ - const char *n; /* The order of the base point. */ - const char *g_x, *g_y; /* Base point. */ - unsigned int h; /* Cofactor. */ -}; - -static const struct ecc_domain_parms sm2_ecp = { - .desc = "sm2p256v1", - .nbits = 256, - .fips = 0, - .model = MPI_EC_WEIERSTRASS, - .dialect = ECC_DIALECT_STANDARD, - .p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff", - .a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc", - .b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93", - .n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123", - .g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7", - .g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0", - .h = 1 -}; - -static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, - const void *key, unsigned int keylen); - -static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec) -{ - const struct ecc_domain_parms *ecp = &sm2_ecp; - MPI p, a, b; - MPI x, y; - int rc = -EINVAL; - - p = mpi_scanval(ecp->p); - a = mpi_scanval(ecp->a); - b = mpi_scanval(ecp->b); - if (!p || !a || !b) - goto free_p; - - x = mpi_scanval(ecp->g_x); - y = mpi_scanval(ecp->g_y); - if (!x || !y) - goto free; - - rc = -ENOMEM; - - ec->Q = mpi_point_new(0); - if (!ec->Q) - goto free; - - /* mpi_ec_setup_elliptic_curve */ - ec->G = mpi_point_new(0); - if (!ec->G) { - mpi_point_release(ec->Q); - goto free; - } - - mpi_set(ec->G->x, x); - mpi_set(ec->G->y, y); - mpi_set_ui(ec->G->z, 1); - - rc = -EINVAL; - ec->n = mpi_scanval(ecp->n); - if (!ec->n) { - mpi_point_release(ec->Q); - mpi_point_release(ec->G); - goto free; - } - - ec->h = ecp->h; - ec->name = ecp->desc; - mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b); - - rc = 0; - -free: - mpi_free(x); - mpi_free(y); -free_p: - mpi_free(p); - mpi_free(a); - mpi_free(b); - - return rc; -} - -static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec) -{ - mpi_ec_deinit(ec); - - memset(ec, 0, sizeof(*ec)); -} - -/* RESULT must have been initialized and is set on success to the - * point given by VALUE. - */ -static int sm2_ecc_os2ec(MPI_POINT result, MPI value) -{ - int rc; - size_t n; - unsigned char *buf; - MPI x, y; - - n = MPI_NBYTES(value); - buf = kmalloc(n, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value); - if (rc) - goto err_freebuf; - - rc = -EINVAL; - if (n < 1 || ((n - 1) % 2)) - goto err_freebuf; - /* No support for point compression */ - if (*buf != 0x4) - goto err_freebuf; - - rc = -ENOMEM; - n = (n - 1) / 2; - x = mpi_read_raw_data(buf + 1, n); - if (!x) - goto err_freebuf; - y = mpi_read_raw_data(buf + 1 + n, n); - if (!y) - goto err_freex; - - mpi_normalize(x); - mpi_normalize(y); - mpi_set(result->x, x); - mpi_set(result->y, y); - mpi_set_ui(result->z, 1); - - rc = 0; - - mpi_free(y); -err_freex: - mpi_free(x); -err_freebuf: - kfree(buf); - return rc; -} - -struct sm2_signature_ctx { - MPI sig_r; - MPI sig_s; -}; - -int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct sm2_signature_ctx *sig = context; - - if (!value || !vlen) - return -EINVAL; - - sig->sig_r = mpi_read_raw_data(value, vlen); - if (!sig->sig_r) - return -ENOMEM; - - return 0; -} - -int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) -{ - struct sm2_signature_ctx *sig = context; - - if (!value || !vlen) - return -EINVAL; - - sig->sig_s = mpi_read_raw_data(value, vlen); - if (!sig->sig_s) - return -ENOMEM; - - return 0; -} - -static int sm2_z_digest_update(struct shash_desc *desc, - MPI m, unsigned int pbytes) -{ - static const unsigned char zero[32]; - unsigned char *in; - unsigned int inlen; - int err; - - in = mpi_get_buffer(m, &inlen, NULL); - if (!in) - return -EINVAL; - - if (inlen < pbytes) { - /* padding with zero */ - err = crypto_shash_update(desc, zero, pbytes - inlen) ?: - crypto_shash_update(desc, in, inlen); - } else if (inlen > pbytes) { - /* skip the starting zero */ - err = crypto_shash_update(desc, in + inlen - pbytes, pbytes); - } else { - err = crypto_shash_update(desc, in, inlen); - } - - kfree(in); - return err; -} - -static int sm2_z_digest_update_point(struct shash_desc *desc, - MPI_POINT point, struct mpi_ec_ctx *ec, - unsigned int pbytes) -{ - MPI x, y; - int ret = -EINVAL; - - x = mpi_new(0); - y = mpi_new(0); - - ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL : - sm2_z_digest_update(desc, x, pbytes) ?: - sm2_z_digest_update(desc, y, pbytes); - - mpi_free(x); - mpi_free(y); - return ret; -} - -int sm2_compute_z_digest(struct shash_desc *desc, - const void *key, unsigned int keylen, void *dgst) -{ - struct mpi_ec_ctx *ec; - unsigned int bits_len; - unsigned int pbytes; - u8 entl[2]; - int err; - - ec = kmalloc(sizeof(*ec), GFP_KERNEL); - if (!ec) - return -ENOMEM; - - err = sm2_ec_ctx_init(ec); - if (err) - goto out_free_ec; - - err = __sm2_set_pub_key(ec, key, keylen); - if (err) - goto out_deinit_ec; - - bits_len = SM2_DEFAULT_USERID_LEN * 8; - entl[0] = bits_len >> 8; - entl[1] = bits_len & 0xff; - - pbytes = MPI_NBYTES(ec->p); - - /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */ - err = crypto_shash_init(desc); - if (err) - goto out_deinit_ec; - - err = crypto_shash_update(desc, entl, 2); - if (err) - goto out_deinit_ec; - - err = crypto_shash_update(desc, SM2_DEFAULT_USERID, - SM2_DEFAULT_USERID_LEN); - if (err) - goto out_deinit_ec; - - err = sm2_z_digest_update(desc, ec->a, pbytes) ?: - sm2_z_digest_update(desc, ec->b, pbytes) ?: - sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?: - sm2_z_digest_update_point(desc, ec->Q, ec, pbytes); - if (err) - goto out_deinit_ec; - - err = crypto_shash_final(desc, dgst); - -out_deinit_ec: - sm2_ec_ctx_deinit(ec); -out_free_ec: - kfree(ec); - return err; -} -EXPORT_SYMBOL_GPL(sm2_compute_z_digest); - -static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s) -{ - int rc = -EINVAL; - struct gcry_mpi_point sG, tP; - MPI t = NULL; - MPI x1 = NULL, y1 = NULL; - - mpi_point_init(&sG); - mpi_point_init(&tP); - x1 = mpi_new(0); - y1 = mpi_new(0); - t = mpi_new(0); - - /* r, s in [1, n-1] */ - if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 || - mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) { - goto leave; - } - - /* t = (r + s) % n, t == 0 */ - mpi_addm(t, sig_r, sig_s, ec->n); - if (mpi_cmp_ui(t, 0) == 0) - goto leave; - - /* sG + tP = (x1, y1) */ - rc = -EBADMSG; - mpi_ec_mul_point(&sG, sig_s, ec->G, ec); - mpi_ec_mul_point(&tP, t, ec->Q, ec); - mpi_ec_add_points(&sG, &sG, &tP, ec); - if (mpi_ec_get_affine(x1, y1, &sG, ec)) - goto leave; - - /* R = (e + x1) % n */ - mpi_addm(t, hash, x1, ec->n); - - /* check R == r */ - rc = -EKEYREJECTED; - if (mpi_cmp(t, sig_r)) - goto leave; - - rc = 0; - -leave: - mpi_point_free_parts(&sG); - mpi_point_free_parts(&tP); - mpi_free(x1); - mpi_free(y1); - mpi_free(t); - - return rc; -} - -static int sm2_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - unsigned char *buffer; - struct sm2_signature_ctx sig; - MPI hash; - int ret; - - if (unlikely(!ec->Q)) - return -EINVAL; - - buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, req->src_len + req->dst_len), - buffer, req->src_len + req->dst_len, 0); - - sig.sig_r = NULL; - sig.sig_s = NULL; - ret = asn1_ber_decoder(&sm2signature_decoder, &sig, - buffer, req->src_len); - if (ret) - goto error; - - ret = -ENOMEM; - hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len); - if (!hash) - goto error; - - ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s); - - mpi_free(hash); -error: - mpi_free(sig.sig_r); - mpi_free(sig.sig_s); - kfree(buffer); - return ret; -} - -static int sm2_set_pub_key(struct crypto_akcipher *tfm, - const void *key, unsigned int keylen) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - - return __sm2_set_pub_key(ec, key, keylen); - -} - -static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, - const void *key, unsigned int keylen) -{ - MPI a; - int rc; - - /* include the uncompressed flag '0x04' */ - a = mpi_read_raw_data(key, keylen); - if (!a) - return -ENOMEM; - - mpi_normalize(a); - rc = sm2_ecc_os2ec(ec->Q, a); - mpi_free(a); - - return rc; -} - -static unsigned int sm2_max_size(struct crypto_akcipher *tfm) -{ - /* Unlimited max size */ - return PAGE_SIZE; -} - -static int sm2_init_tfm(struct crypto_akcipher *tfm) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - - return sm2_ec_ctx_init(ec); -} - -static void sm2_exit_tfm(struct crypto_akcipher *tfm) -{ - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); - - sm2_ec_ctx_deinit(ec); -} - -static struct akcipher_alg sm2 = { - .verify = sm2_verify, - .set_pub_key = sm2_set_pub_key, - .max_size = sm2_max_size, - .init = sm2_init_tfm, - .exit = sm2_exit_tfm, - .base = { - .cra_name = "sm2", - .cra_driver_name = "sm2-generic", - .cra_priority = 100, - .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct mpi_ec_ctx), - }, -}; - -static int __init sm2_init(void) -{ - return crypto_register_akcipher(&sm2); -} - -static void __exit sm2_exit(void) -{ - crypto_unregister_akcipher(&sm2); -} - -subsys_initcall(sm2_init); -module_exit(sm2_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>"); -MODULE_DESCRIPTION("SM2 generic algorithm"); -MODULE_ALIAS_CRYPTO("sm2-generic"); diff --git a/crypto/sm2signature.asn1 b/crypto/sm2signature.asn1 deleted file mode 100644 index ab8c0b754d21..000000000000 --- a/crypto/sm2signature.asn1 +++ /dev/null @@ -1,4 +0,0 @@ -Sm2Signature ::= SEQUENCE { - sig_r INTEGER ({ sm2_get_signature_r }), - sig_s INTEGER ({ sm2_get_signature_s }) -} diff --git a/crypto/sm3.c b/crypto/sm3.c deleted file mode 100644 index d473e358a873..000000000000 --- a/crypto/sm3.c +++ /dev/null @@ -1,246 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described - * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 - * - * Copyright (C) 2017 ARM Limited or its affiliates. - * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> - * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <asm/unaligned.h> -#include <crypto/sm3.h> - -static const u32 ____cacheline_aligned K[64] = { - 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, - 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, - 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, - 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, - 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, - 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, - 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, - 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 -}; - -/* - * Transform the message X which consists of 16 32-bit-words. See - * GM/T 004-2012 for details. - */ -#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ - do { \ - ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ - ss2 = ss1 ^ rol32((a), 12); \ - d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ - h += GG ## i(e, f, g) + ss1 + (w1); \ - b = rol32((b), 9); \ - f = rol32((f), 19); \ - h = P0((h)); \ - } while (0) - -#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(1, a, b, c, d, e, f, g, h, t, w1, w2) -#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(2, a, b, c, d, e, f, g, h, t, w1, w2) - -#define FF1(x, y, z) (x ^ y ^ z) -#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) - -#define GG1(x, y, z) FF1(x, y, z) -#define GG2(x, y, z) ((x & y) | (~x & z)) - -/* Message expansion */ -#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) -#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) -#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) -#define W1(i) (W[i & 0x0f]) -#define W2(i) (W[i & 0x0f] = \ - P1(W[i & 0x0f] \ - ^ W[(i-9) & 0x0f] \ - ^ rol32(W[(i-3) & 0x0f], 15)) \ - ^ rol32(W[(i-13) & 0x0f], 7) \ - ^ W[(i-6) & 0x0f]) - -static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) -{ - u32 a, b, c, d, e, f, g, h, ss1, ss2; - - a = sctx->state[0]; - b = sctx->state[1]; - c = sctx->state[2]; - d = sctx->state[3]; - e = sctx->state[4]; - f = sctx->state[5]; - g = sctx->state[6]; - h = sctx->state[7]; - - R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); - R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); - R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); - R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); - R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); - R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); - R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); - R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); - R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); - R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); - R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); - R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); - R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); - R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); - R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); - R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); - - R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); - R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); - R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); - R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); - R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); - R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); - R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); - R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); - R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); - R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); - R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); - R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); - R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); - R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); - R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); - R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); - - R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); - R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); - R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); - R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); - R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); - R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); - R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); - R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); - R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); - R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); - R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); - R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); - R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); - R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); - R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); - R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); - - R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); - R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); - R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); - R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); - R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); - R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); - R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); - R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); - R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); - R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); - R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); - R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); - R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); - R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); - R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); - R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); - - sctx->state[0] ^= a; - sctx->state[1] ^= b; - sctx->state[2] ^= c; - sctx->state[3] ^= d; - sctx->state[4] ^= e; - sctx->state[5] ^= f; - sctx->state[6] ^= g; - sctx->state[7] ^= h; -} -#undef R -#undef R1 -#undef R2 -#undef I -#undef W1 -#undef W2 - -static inline void sm3_block(struct sm3_state *sctx, - u8 const *data, int blocks, u32 W[16]) -{ - while (blocks--) { - sm3_transform(sctx, data, W); - data += SM3_BLOCK_SIZE; - } -} - -void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) -{ - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - - sctx->count += len; - - if ((partial + len) >= SM3_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SM3_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - blocks = len / SM3_BLOCK_SIZE; - len %= SM3_BLOCK_SIZE; - - if (blocks) { - sm3_block(sctx, data, blocks, W); - data += blocks * SM3_BLOCK_SIZE; - } - - memzero_explicit(W, sizeof(W)); - - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); -} -EXPORT_SYMBOL_GPL(sm3_update); - -void sm3_final(struct sm3_state *sctx, u8 *out) -{ - const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - __be32 *digest = (__be32 *)out; - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - int i; - - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); - partial = 0; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - memset(sctx->buffer + partial, 0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - sm3_block(sctx, sctx->buffer, 1, W); - - for (i = 0; i < 8; i++) - put_unaligned_be32(sctx->state[i], digest++); - - /* Zeroize sensitive information. */ - memzero_explicit(W, sizeof(W)); - memzero_explicit(sctx, sizeof(*sctx)); -} -EXPORT_SYMBOL_GPL(sm3_final); - -MODULE_DESCRIPTION("Generic SM3 library"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index a215c1c37e73..7529139fcc96 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -9,15 +9,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> -#include <linux/bitops.h> -#include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, @@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash); static int crypto_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - sm3_update(shash_desc_ctx(desc), data, len); - return 0; -} - -static int crypto_sm3_final(struct shash_desc *desc, u8 *out) -{ - sm3_final(shash_desc_ctx(desc), out); - return 0; + return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic); } static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (len) - sm3_update(sctx, data, len); - sm3_final(sctx, hash); - return 0; + sm3_base_do_finup(desc, data, len, sm3_block_generic); + return sm3_base_finish(desc, hash); } static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = crypto_sm3_update, - .final = crypto_sm3_final, .finup = crypto_sm3_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base = { .cra_name = "sm3", .cra_driver_name = "sm3-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void) crypto_unregister_shash(&sm3_alg); } -subsys_initcall(sm3_generic_mod_init); +module_init(sm3_generic_mod_init); module_exit(sm3_generic_mod_fini); MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm4.c b/crypto/sm4.c index 2c44193bc27e..f4cd7edc11f0 100644 --- a/crypto/sm4.c +++ b/crypto/sm4.c @@ -8,7 +8,7 @@ */ #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/sm4.h> static const u32 ____cacheline_aligned fk[4] = { diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 560eba37dc55..d57444e8428c 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -14,7 +14,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /** * sm4_setkey - Set the SM4 key. @@ -83,7 +83,7 @@ static void __exit sm4_fini(void) crypto_unregister_alg(&sm4_alg); } -subsys_initcall(sm4_init); +module_init(sm4_init); module_exit(sm4_fini); MODULE_DESCRIPTION("SM4 Cipher Algorithm"); diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c index dc625ffc54ad..57bbf70f4c22 100644 --- a/crypto/streebog_generic.c +++ b/crypto/streebog_generic.c @@ -13,9 +13,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/module.h> -#include <linux/crypto.h> #include <crypto/streebog.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> static const struct streebog_uint512 buffer0 = { { 0, 0, 0, 0, 0, 0, 0, 0 @@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc) return 0; } -static void streebog_pad(struct streebog_state *ctx) -{ - if (ctx->fillsize >= STREEBOG_BLOCK_SIZE) - return; - - memset(ctx->buffer + ctx->fillsize, 0, - sizeof(ctx->buffer) - ctx->fillsize); - - ctx->buffer[ctx->fillsize] = 1; -} - static void streebog_add512(const struct streebog_uint512 *x, const struct streebog_uint512 *y, struct streebog_uint512 *r) @@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data) streebog_add512(&ctx->Sigma, &m, &ctx->Sigma); } -static void streebog_stage3(struct streebog_state *ctx) +static void streebog_stage3(struct streebog_state *ctx, const u8 *src, + unsigned int len) { struct streebog_uint512 buf = { { 0 } }; + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + } u = {}; - buf.qword[0] = cpu_to_le64(ctx->fillsize << 3); - streebog_pad(ctx); + buf.qword[0] = cpu_to_le64(len << 3); + memcpy(u.buffer, src, len); + u.buffer[len] = 1; - streebog_g(&ctx->h, &ctx->N, &ctx->m); + streebog_g(&ctx->h, &ctx->N, &u.m); streebog_add512(&ctx->N, &buf, &ctx->N); - streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma); + streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma); + memzero_explicit(&u, sizeof(u)); streebog_g(&ctx->h, &buffer0, &ctx->N); streebog_g(&ctx->h, &buffer0, &ctx->Sigma); memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512)); @@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct streebog_state *ctx = shash_desc_ctx(desc); - size_t chunksize; - if (ctx->fillsize) { - chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize; - if (chunksize > len) - chunksize = len; - memcpy(&ctx->buffer[ctx->fillsize], data, chunksize); - ctx->fillsize += chunksize; - len -= chunksize; - data += chunksize; - - if (ctx->fillsize == STREEBOG_BLOCK_SIZE) { - streebog_stage2(ctx, ctx->buffer); - ctx->fillsize = 0; - } - } - - while (len >= STREEBOG_BLOCK_SIZE) { + do { streebog_stage2(ctx, data); data += STREEBOG_BLOCK_SIZE; len -= STREEBOG_BLOCK_SIZE; - } + } while (len >= STREEBOG_BLOCK_SIZE); - if (len) { - memcpy(&ctx->buffer, data, len); - ctx->fillsize = len; - } - return 0; + return len; } -static int streebog_final(struct shash_desc *desc, u8 *digest) +static int streebog_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *digest) { struct streebog_state *ctx = shash_desc_ctx(desc); - streebog_stage3(ctx); - ctx->fillsize = 0; + streebog_stage3(ctx, src, len); if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE) memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE); else @@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG256_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog256", .cra_driver_name = "streebog256-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, }, @@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG512_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog512", .cra_driver_name = "streebog512-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -subsys_initcall(streebog_mod_init); +module_init(streebog_mod_init); module_exit(streebog_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8aea416f6480..d1d88debbd71 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> @@ -39,7 +39,7 @@ #include "tcrypt.h" /* - * Need slab memory for testing (size in number of pages). + * Need slab memory for benchmarking (size in number of pages). */ #define TVMEMSIZE 4 @@ -1654,10 +1654,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret = min(ret, tcrypt_test("ghash")); break; - case 47: - ret = min(ret, tcrypt_test("crct10dif")); - break; - case 48: ret = min(ret, tcrypt_test("sha3-224")); break; @@ -1738,10 +1734,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret = min(ret, tcrypt_test("hmac(rmd160)")); break; - case 109: - ret = min(ret, tcrypt_test("vmac64(aes)")); - break; - case 111: ret = min(ret, tcrypt_test("hmac(sha3-224)")); break; @@ -2276,10 +2268,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 320: - test_hash_speed("crct10dif", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 321: test_hash_speed("poly1305", sec, poly1305_speed_template); if (mode > 300 && mode < 400) break; @@ -2613,6 +2601,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) break; case 600: + if (alg) { + u8 speed_template[2] = {klen, 0}; + test_mb_skcipher_speed(alg, ENCRYPT, sec, NULL, 0, + speed_template, num_mb); + test_mb_skcipher_speed(alg, DECRYPT, sec, NULL, 0, + speed_template, num_mb); + break; + } + test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32, num_mb); test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, @@ -2871,5 +2868,5 @@ module_param(klen, uint, 0); MODULE_PARM_DESC(klen, "Key length (defaults to 0)"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Quick & dirty crypto testing module"); +MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 96c843a24607..7f938ac93e58 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> diff --git a/crypto/tea.c b/crypto/tea.c index 896f863f3067..cb05140e3470 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define TEA_KEY_SIZE 16 @@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) sum -= TEA_DELTA; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); @@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } @@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; @@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static struct crypto_alg tea_algs[3] = { { @@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct tea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = TEA_KEY_SIZE, @@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, @@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, @@ -272,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea"); MODULE_ALIAS_CRYPTO("xtea"); MODULE_ALIAS_CRYPTO("xeta"); -subsys_initcall(tea_mod_init); +module_init(tea_mod_init); module_exit(tea_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 3dddd288ca02..32f753d6c430 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -23,7 +23,7 @@ #include <linux/fips.h> #include <linux/module.h> #include <linux/once.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> @@ -33,31 +33,32 @@ #include <crypto/akcipher.h> #include <crypto/kpp.h> #include <crypto/acompress.h> +#include <crypto/sig.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include "internal.h" -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); static bool notests; module_param(notests, bool, 0644); -MODULE_PARM_DESC(notests, "disable crypto self-tests"); +MODULE_PARM_DESC(notests, "disable all crypto self-tests"); -static bool panic_on_fail; -module_param(panic_on_fail, bool, 0444); - -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS -static bool noextratests; -module_param(noextratests, bool, 0644); -MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests"); +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL +static bool noslowtests; +module_param(noslowtests, bool, 0644); +MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests"); static unsigned int fuzz_iterations = 100; module_param(fuzz_iterations, uint, 0644); MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); +#else +#define noslowtests 1 +#define fuzz_iterations 0 #endif -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifndef CONFIG_CRYPTO_SELFTESTS /* a perfect nop */ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) @@ -131,6 +132,11 @@ struct akcipher_test_suite { unsigned int count; }; +struct sig_test_suite { + const struct sig_testvec *vecs; + unsigned int count; +}; + struct kpp_test_suite { const struct kpp_testvec *vecs; unsigned int count; @@ -151,6 +157,7 @@ struct alg_test_desc { struct cprng_test_suite cprng; struct drbg_test_suite drbg; struct akcipher_test_suite akcipher; + struct sig_test_suite sig; struct kpp_test_suite kpp; } suite; }; @@ -293,6 +300,10 @@ struct test_sg_division { * the @key_offset * @finalization_type: what finalization function to use for hashes * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP. + * This applies to the parts of the operation that aren't controlled + * individually by @nosimd_setkey or @src_divs[].nosimd. + * @nosimd_setkey: set the key (if applicable) with SIMD disabled? Requires + * !CRYPTO_TFM_REQ_MAY_SLEEP. */ struct testvec_config { const char *name; @@ -306,16 +317,16 @@ struct testvec_config { bool key_offset_relative_to_alignmask; enum finalization_type finalization_type; bool nosimd; + bool nosimd_setkey; }; #define TESTVEC_CONFIG_NAMELEN 192 /* * The following are the lists of testvec_configs to test for each algorithm - * type when the basic crypto self-tests are enabled, i.e. when - * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test - * coverage, while keeping the test time much shorter than the full fuzz tests - * so that the basic tests can be enabled in a wider range of circumstances. + * type when the "fast" crypto self-tests are enabled. They aim to provide good + * test coverage, while keeping the test time much shorter than the "full" tests + * so that the "fast" tests can be enabled in a wider range of circumstances. */ /* Configs for skciphers and aeads */ @@ -533,7 +544,8 @@ static bool valid_testvec_config(const struct testvec_config *cfg) cfg->finalization_type == FINALIZATION_TYPE_DIGEST) return false; - if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) && + if ((cfg->nosimd || cfg->nosimd_setkey || + (flags & SGDIVS_HAVE_NOSIMD)) && (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) return false; @@ -841,7 +853,10 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize, return 0; } -/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */ +/* + * Like setkey_f(tfm, key, ksize), but sometimes misalign the key. + * In addition, run the setkey function in no-SIMD context if requested. + */ #define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \ ({ \ const u8 *keybuf, *keyptr; \ @@ -850,14 +865,16 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize, err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \ &keybuf, &keyptr); \ if (err == 0) { \ + if ((cfg)->nosimd_setkey) \ + crypto_disable_simd_for_test(); \ err = setkey_f((tfm), keyptr, (ksize)); \ + if ((cfg)->nosimd_setkey) \ + crypto_reenable_simd_for_test(); \ kfree(keybuf); \ } \ err; \ }) -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - /* * The fuzz tests use prandom instead of the normal Linux RNG since they don't * need cryptographically secure random numbers. This greatly improves the @@ -903,14 +920,20 @@ static unsigned int generate_random_length(struct rnd_state *rng, switch (prandom_u32_below(rng, 4)) { case 0: - return len % 64; + len %= 64; + break; case 1: - return len % 256; + len %= 256; + break; case 2: - return len % 1024; + len %= 1024; + break; default: - return len; + break; } + if (len && prandom_u32_below(rng, 4) == 0) + len = rounddown_pow_of_two(len); + return len; } /* Flip a random bit in the given nonempty data buffer */ @@ -1006,6 +1029,8 @@ static char *generate_random_sgl_divisions(struct rnd_state *rng, if (div == &divs[max_divs - 1] || prandom_bool(rng)) this_len = remaining; + else if (prandom_u32_below(rng, 4) == 0) + this_len = (remaining + 1) / 2; else this_len = prandom_u32_inclusive(rng, 1, remaining); div->proportion_of_total = this_len; @@ -1118,9 +1143,15 @@ static void generate_random_testvec_config(struct rnd_state *rng, break; } - if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) { - cfg->nosimd = true; - p += scnprintf(p, end - p, " nosimd"); + if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) { + if (prandom_bool(rng)) { + cfg->nosimd = true; + p += scnprintf(p, end - p, " nosimd"); + } + if (prandom_bool(rng)) { + cfg->nosimd_setkey = true; + p += scnprintf(p, end - p, " nosimd_setkey"); + } } p += scnprintf(p, end - p, " src_divs=["); @@ -1157,14 +1188,18 @@ static void generate_random_testvec_config(struct rnd_state *rng, static void crypto_disable_simd_for_test(void) { +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL migrate_disable(); __this_cpu_write(crypto_simd_disabled_for_test, true); +#endif } static void crypto_reenable_simd_for_test(void) { +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL __this_cpu_write(crypto_simd_disabled_for_test, false); migrate_enable(); +#endif } /* @@ -1208,15 +1243,6 @@ too_long: algname); return -ENAMETOOLONG; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static void crypto_disable_simd_for_test(void) -{ -} - -static void crypto_reenable_simd_for_test(void) -{ -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int build_hash_sglist(struct test_sglist *tsgl, const struct hash_testvec *vec, @@ -1657,8 +1683,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -1675,17 +1700,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a hash test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. */ static void generate_random_hash_testvec(struct rnd_state *rng, - struct shash_desc *desc, + struct ahash_request *req, struct hash_testvec *vec, unsigned int maxkeysize, unsigned int maxdatasize, @@ -1707,16 +1730,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng, vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize); generate_random_bytes(rng, (u8 *)vec->key, vec->ksize); - vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, - vec->ksize); + vec->setkey_error = crypto_ahash_setkey( + crypto_ahash_reqtfm(req), vec->key, vec->ksize); /* If the key couldn't be set, no need to continue to digest. */ if (vec->setkey_error) goto done; } /* Digest */ - vec->digest_error = crypto_shash_digest(desc, vec->plaintext, - vec->psize, (u8 *)vec->digest); + vec->digest_error = crypto_hash_digest( + crypto_ahash_reqtfm(req), vec->plaintext, + vec->psize, (u8 *)vec->digest); done: snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"", vec->psize, vec->ksize); @@ -1741,8 +1765,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver, const char *driver = crypto_ahash_driver_name(tfm); struct rnd_state rng; char _generic_driver[CRYPTO_MAX_ALG_NAME]; - struct crypto_shash *generic_tfm = NULL; - struct shash_desc *generic_desc = NULL; + struct ahash_request *generic_req = NULL; + struct crypto_ahash *generic_tfm = NULL; unsigned int i; struct hash_testvec vec = { 0 }; char vec_name[64]; @@ -1750,7 +1774,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) + if (noslowtests) return 0; init_rnd_state(&rng); @@ -1765,7 +1789,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */ return 0; - generic_tfm = crypto_alloc_shash(generic_driver, 0, 0); + generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0); if (IS_ERR(generic_tfm)) { err = PTR_ERR(generic_tfm); if (err == -ENOENT) { @@ -1784,27 +1808,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver, goto out; } - generic_desc = kzalloc(sizeof(*desc) + - crypto_shash_descsize(generic_tfm), GFP_KERNEL); - if (!generic_desc) { + generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL); + if (!generic_req) { err = -ENOMEM; goto out; } - generic_desc->tfm = generic_tfm; /* Check the algorithm properties for consistency. */ - if (digestsize != crypto_shash_digestsize(generic_tfm)) { + if (digestsize != crypto_ahash_digestsize(generic_tfm)) { pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n", driver, digestsize, - crypto_shash_digestsize(generic_tfm)); + crypto_ahash_digestsize(generic_tfm)); err = -EINVAL; goto out; } - if (blocksize != crypto_shash_blocksize(generic_tfm)) { + if (blocksize != crypto_ahash_blocksize(generic_tfm)) { pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n", - driver, blocksize, crypto_shash_blocksize(generic_tfm)); + driver, blocksize, crypto_ahash_blocksize(generic_tfm)); err = -EINVAL; goto out; } @@ -1823,7 +1845,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, } for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_hash_testvec(&rng, generic_desc, &vec, + generate_random_hash_testvec(&rng, generic_req, &vec, maxkeysize, maxdatasize, vec_name, sizeof(vec_name)); generate_random_testvec_config(&rng, cfg, cfgname, @@ -1841,21 +1863,10 @@ out: kfree(vec.key); kfree(vec.plaintext); kfree(vec.digest); - crypto_free_shash(generic_tfm); - kfree_sensitive(generic_desc); + ahash_request_free(generic_req); + crypto_free_ahash(generic_tfm); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_hash_vs_generic_impl(const char *generic_driver, - unsigned int maxkeysize, - struct ahash_request *req, - struct shash_desc *desc, - struct test_sglist *tsgl, - u8 *hashstate) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int alloc_shash(const char *driver, u32 type, u32 mask, struct crypto_shash **tfm_ret, @@ -1866,7 +1877,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask, tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { + if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) { /* * This algorithm is only available through the ahash * API, not the shash API, so skip the shash tests. @@ -1912,6 +1923,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs, atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { + if (PTR_ERR(atfm) == -ENOENT) + return 0; pr_err("alg: hash: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(atfm)); return PTR_ERR(atfm); @@ -2227,8 +2240,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -2245,13 +2257,10 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - -struct aead_extra_tests_ctx { +struct aead_slow_tests_ctx { struct rnd_state rng; struct aead_request *req; struct crypto_aead *tfm; @@ -2426,8 +2435,7 @@ static void generate_random_aead_testvec(struct rnd_state *rng, vec->alen, vec->plen, authsize, vec->klen, vec->novrfy); } -static void try_to_generate_inauthentic_testvec( - struct aead_extra_tests_ctx *ctx) +static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx) { int i; @@ -2446,7 +2454,7 @@ static void try_to_generate_inauthentic_testvec( * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the * result of an encryption with the key) and verify that decryption fails. */ -static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) +static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx) { unsigned int i; int err; @@ -2481,7 +2489,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) * Test the AEAD algorithm against the corresponding generic implementation, if * one is available. */ -static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) +static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx) { struct crypto_aead *tfm = ctx->tfm; const char *algname = crypto_aead_alg(tfm)->base.cra_name; @@ -2585,15 +2593,15 @@ out: return err; } -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) +static int test_aead_slow(const struct alg_test_desc *test_desc, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { - struct aead_extra_tests_ctx *ctx; + struct aead_slow_tests_ctx *ctx; unsigned int i; int err; - if (noextratests) + if (noslowtests) return 0; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -2635,14 +2643,6 @@ out: kfree(ctx); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_aead(int enc, const struct aead_test_suite *suite, struct aead_request *req, @@ -2676,6 +2676,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: aead: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -2706,7 +2708,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, if (err) goto out; - err = test_aead_extra(desc, req, tsgls); + err = test_aead_slow(desc, req, tsgls); out: free_cipher_test_sglists(tsgls); aead_request_free(req); @@ -2847,18 +2849,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec, if (ivsize) { if (WARN_ON(ivsize > MAX_IVLEN)) return -EINVAL; - if (vec->generates_iv && !enc) - memcpy(iv, vec->iv_out, ivsize); - else if (vec->iv) + if (vec->iv) memcpy(iv, vec->iv, ivsize); else memset(iv, 0, ivsize); } else { - if (vec->generates_iv) { - pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n", - driver, vec_name); - return -EINVAL; - } iv = NULL; } @@ -2987,8 +2982,7 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -3005,11 +2999,9 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a symmetric cipher test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. @@ -3092,11 +3084,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) - return 0; - - /* Keywrap isn't supported here yet as it handles its IV differently. */ - if (strncmp(algname, "kw(", 3) == 0) + if (noslowtests) return 0; init_rnd_state(&rng); @@ -3212,14 +3200,6 @@ out: skcipher_request_free(generic_req); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_skcipher_vs_generic_impl(const char *generic_driver, - struct skcipher_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_skcipher(int enc, const struct cipher_test_suite *suite, struct skcipher_request *req, @@ -3253,6 +3233,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3291,112 +3273,6 @@ out: return err; } -static int test_comp(struct crypto_comp *tfm, - const struct comp_testvec *ctemplate, - const struct comp_testvec *dtemplate, - int ctcount, int dtcount) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); - char *output, *decomp_output; - unsigned int i; - int ret; - - output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!output) - return -ENOMEM; - - decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!decomp_output) { - kfree(output); - return -ENOMEM; - } - - for (i = 0; i < ctcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(output, 0, COMP_BUF_SIZE); - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = ctemplate[i].inlen; - ret = crypto_comp_compress(tfm, ctemplate[i].input, - ilen, output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: compression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - ilen = dlen; - dlen = COMP_BUF_SIZE; - ret = crypto_comp_decompress(tfm, output, - ilen, decomp_output, &dlen); - if (ret) { - pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n", - i + 1, algo, -ret); - goto out; - } - - if (dlen != ctemplate[i].inlen) { - printk(KERN_ERR "alg: comp: Compression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, ctemplate[i].input, - ctemplate[i].inlen)) { - pr_err("alg: comp: compression failed: output differs: on test %d for %s\n", - i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - for (i = 0; i < dtcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = dtemplate[i].inlen; - ret = crypto_comp_decompress(tfm, dtemplate[i].input, - ilen, decomp_output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: decompression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - if (dlen != dtemplate[i].outlen) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, dtemplate[i].output, dlen)) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s\n", i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - ret = 0; - -out: - kfree(decomp_output); - kfree(output); - return ret; -} - static int test_acomp(struct crypto_acomp *tfm, const struct comp_testvec *ctemplate, const struct comp_testvec *dtemplate, @@ -3493,21 +3369,6 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - crypto_init_wait(&wait); - sg_init_one(&src, input_vec, ilen); - acomp_request_set_params(req, &src, NULL, ilen, 0); - - ret = crypto_wait_req(crypto_acomp_compress(req), &wait); - if (ret) { - pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n", - i + 1, algo, -ret); - kfree(input_vec); - acomp_request_free(req); - goto out; - } -#endif - kfree(input_vec); acomp_request_free(req); } @@ -3569,20 +3430,6 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - crypto_init_wait(&wait); - acomp_request_set_params(req, &src, NULL, ilen, 0); - - ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); - if (ret) { - pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n", - i + 1, algo, -ret); - kfree(input_vec); - acomp_request_free(req); - goto out; - } -#endif - kfree(input_vec); acomp_request_free(req); } @@ -3666,6 +3513,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc, tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3682,38 +3531,22 @@ static int alg_test_cipher(const struct alg_test_desc *desc, static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { - struct crypto_comp *comp; struct crypto_acomp *acomp; int err; - u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; - - if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { - acomp = crypto_alloc_acomp(driver, type, mask); - if (IS_ERR(acomp)) { - pr_err("alg: acomp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(acomp)); - return PTR_ERR(acomp); - } - err = test_acomp(acomp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_acomp(acomp); - } else { - comp = crypto_alloc_comp(driver, type, mask); - if (IS_ERR(comp)) { - pr_err("alg: comp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(comp)); - return PTR_ERR(comp); - } - - err = test_comp(comp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_comp(comp); - } + acomp = crypto_alloc_acomp(driver, type, mask); + if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) + return 0; + pr_err("alg: acomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(acomp)); + return PTR_ERR(acomp); + } + err = test_acomp(acomp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); + crypto_free_acomp(acomp); return err; } @@ -3778,6 +3611,8 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, rng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(rng)) { + if (PTR_ERR(rng) == -ENOENT) + return 0; printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); return PTR_ERR(rng); @@ -3805,10 +3640,12 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { + kfree_sensitive(buf); + if (PTR_ERR(drng) == -ENOENT) + return 0; printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); - kfree_sensitive(buf); - return -ENOMEM; + return PTR_ERR(drng); } test_data.testentropy = &testentropy; @@ -4050,6 +3887,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4078,11 +3917,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, struct crypto_wait wait; unsigned int out_len_max, out_len = 0; int err = -ENOMEM; - struct scatterlist src, dst, src_tab[3]; - const char *m, *c; - unsigned int m_size, c_size; - const char *op; - u8 *key, *ptr; + struct scatterlist src, dst, src_tab[2]; + const char *c; + unsigned int c_size; if (testmgr_alloc_buf(xbuf)) return err; @@ -4093,92 +3930,53 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, crypto_init_wait(&wait); - key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len, - GFP_KERNEL); - if (!key) - goto free_req; - memcpy(key, vecs->key, vecs->key_len); - ptr = key + vecs->key_len; - ptr = test_pack_u32(ptr, vecs->algo); - ptr = test_pack_u32(ptr, vecs->param_len); - memcpy(ptr, vecs->params, vecs->param_len); - if (vecs->public_key_vec) - err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len); + err = crypto_akcipher_set_pub_key(tfm, vecs->key, + vecs->key_len); else - err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len); + err = crypto_akcipher_set_priv_key(tfm, vecs->key, + vecs->key_len); if (err) - goto free_key; + goto free_req; - /* - * First run test which do not require a private key, such as - * encrypt or verify. - */ + /* First run encrypt test which does not require a private key */ err = -ENOMEM; out_len_max = crypto_akcipher_maxsize(tfm); outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); if (!outbuf_enc) - goto free_key; - - if (!vecs->siggen_sigver_test) { - m = vecs->m; - m_size = vecs->m_size; - c = vecs->c; - c_size = vecs->c_size; - op = "encrypt"; - } else { - /* Swap args so we could keep plaintext (digest) - * in vecs->m, and cooked signature in vecs->c. - */ - m = vecs->c; /* signature */ - m_size = vecs->c_size; - c = vecs->m; /* digest */ - c_size = vecs->m_size; - op = "verify"; - } + goto free_req; + + c = vecs->c; + c_size = vecs->c_size; err = -E2BIG; - if (WARN_ON(m_size > PAGE_SIZE)) + if (WARN_ON(vecs->m_size > PAGE_SIZE)) goto free_all; - memcpy(xbuf[0], m, m_size); + memcpy(xbuf[0], vecs->m, vecs->m_size); - sg_init_table(src_tab, 3); + sg_init_table(src_tab, 2); sg_set_buf(&src_tab[0], xbuf[0], 8); - sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8); - if (vecs->siggen_sigver_test) { - if (WARN_ON(c_size > PAGE_SIZE)) - goto free_all; - memcpy(xbuf[1], c, c_size); - sg_set_buf(&src_tab[2], xbuf[1], c_size); - akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size); - } else { - sg_init_one(&dst, outbuf_enc, out_len_max); - akcipher_request_set_crypt(req, src_tab, &dst, m_size, - out_len_max); - } + sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8); + sg_init_one(&dst, outbuf_enc, out_len_max); + akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, + out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); - err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : - /* Run asymmetric encrypt */ - crypto_akcipher_encrypt(req), &wait); + err = crypto_wait_req(crypto_akcipher_encrypt(req), &wait); if (err) { - pr_err("alg: akcipher: %s test failed. err %d\n", op, err); + pr_err("alg: akcipher: encrypt test failed. err %d\n", err); goto free_all; } - if (!vecs->siggen_sigver_test && c) { + if (c) { if (req->dst_len != c_size) { - pr_err("alg: akcipher: %s test failed. Invalid output len\n", - op); + pr_err("alg: akcipher: encrypt test failed. Invalid output len\n"); err = -EINVAL; goto free_all; } /* verify that encrypted message is equal to expected */ if (memcmp(c, outbuf_enc, c_size) != 0) { - pr_err("alg: akcipher: %s test failed. Invalid output\n", - op); + pr_err("alg: akcipher: encrypt test failed. Invalid output\n"); hexdump(outbuf_enc, c_size); err = -EINVAL; goto free_all; @@ -4186,7 +3984,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, } /* - * Don't invoke (decrypt or sign) test which require a private key + * Don't invoke decrypt test which requires a private key * for vectors with only a public key. */ if (vecs->public_key_vec) { @@ -4199,13 +3997,12 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, goto free_all; } - if (!vecs->siggen_sigver_test && !c) { + if (!c) { c = outbuf_enc; c_size = req->dst_len; } err = -E2BIG; - op = vecs->siggen_sigver_test ? "sign" : "decrypt"; if (WARN_ON(c_size > PAGE_SIZE)) goto free_all; memcpy(xbuf[0], c, c_size); @@ -4215,34 +4012,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, crypto_init_wait(&wait); akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max); - err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : - /* Run asymmetric decrypt */ - crypto_akcipher_decrypt(req), &wait); + err = crypto_wait_req(crypto_akcipher_decrypt(req), &wait); if (err) { - pr_err("alg: akcipher: %s test failed. err %d\n", op, err); + pr_err("alg: akcipher: decrypt test failed. err %d\n", err); goto free_all; } out_len = req->dst_len; - if (out_len < m_size) { - pr_err("alg: akcipher: %s test failed. Invalid output len %u\n", - op, out_len); + if (out_len < vecs->m_size) { + pr_err("alg: akcipher: decrypt test failed. Invalid output len %u\n", + out_len); err = -EINVAL; goto free_all; } /* verify that decrypted message is equal to the original msg */ - if (memchr_inv(outbuf_dec, 0, out_len - m_size) || - memcmp(m, outbuf_dec + out_len - m_size, m_size)) { - pr_err("alg: akcipher: %s test failed. Invalid output\n", op); + if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) || + memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size, + vecs->m_size)) { + pr_err("alg: akcipher: decrypt test failed. Invalid output\n"); hexdump(outbuf_dec, out_len); err = -EINVAL; } free_all: kfree(outbuf_dec); kfree(outbuf_enc); -free_key: - kfree(key); free_req: akcipher_request_free(req); free_xbuf: @@ -4278,6 +4070,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return 0; pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4290,6 +4084,114 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, return err; } +static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs) +{ + u8 *ptr, *key __free(kfree); + int err, sig_size; + + key = kmalloc(vecs->key_len + 2 * sizeof(u32) + vecs->param_len, + GFP_KERNEL); + if (!key) + return -ENOMEM; + + /* ecrdsa expects additional parameters appended to the key */ + memcpy(key, vecs->key, vecs->key_len); + ptr = key + vecs->key_len; + ptr = test_pack_u32(ptr, vecs->algo); + ptr = test_pack_u32(ptr, vecs->param_len); + memcpy(ptr, vecs->params, vecs->param_len); + + if (vecs->public_key_vec) + err = crypto_sig_set_pubkey(tfm, key, vecs->key_len); + else + err = crypto_sig_set_privkey(tfm, key, vecs->key_len); + if (err) + return err; + + /* + * Run asymmetric signature verification first + * (which does not require a private key) + */ + err = crypto_sig_verify(tfm, vecs->c, vecs->c_size, + vecs->m, vecs->m_size); + if (err) { + pr_err("alg: sig: verify test failed: err %d\n", err); + return err; + } + + /* + * Don't invoke sign test (which requires a private key) + * for vectors with only a public key. + */ + if (vecs->public_key_vec) + return 0; + + sig_size = crypto_sig_maxsize(tfm); + if (sig_size < vecs->c_size) { + pr_err("alg: sig: invalid maxsize %u\n", sig_size); + return -EINVAL; + } + + u8 *sig __free(kfree) = kzalloc(sig_size, GFP_KERNEL); + if (!sig) + return -ENOMEM; + + /* Run asymmetric signature generation */ + err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size); + if (err < 0) { + pr_err("alg: sig: sign test failed: err %d\n", err); + return err; + } + + /* Verify that generated signature equals cooked signature */ + if (err != vecs->c_size || + memcmp(sig, vecs->c, vecs->c_size) || + memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) { + pr_err("alg: sig: sign test failed: invalid output\n"); + hexdump(sig, sig_size); + return -EINVAL; + } + + return 0; +} + +static int test_sig(struct crypto_sig *tfm, const char *alg, + const struct sig_testvec *vecs, unsigned int tcount) +{ + const char *algo = crypto_tfm_alg_driver_name(crypto_sig_tfm(tfm)); + int ret, i; + + for (i = 0; i < tcount; i++) { + ret = test_sig_one(tfm, vecs++); + if (ret) { + pr_err("alg: sig: test %d failed for %s: err %d\n", + i + 1, algo, ret); + return ret; + } + } + return 0; +} + +static int alg_test_sig(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + struct crypto_sig *tfm; + int err = 0; + + tfm = crypto_alloc_sig(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: sig: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + if (desc->suite.sig.vecs) + err = test_sig(tfm, desc->alg, desc->suite.sig.vecs, + desc->suite.sig.count); + + crypto_free_sig(tfm); + return err; +} + static int alg_test_null(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -4402,6 +4304,12 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha256),cts(cbc(aes)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128) + } + }, { .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, @@ -4422,6 +4330,12 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha384),cts(cbc(aes)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192) + } + }, { .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, @@ -4640,9 +4554,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(sm4_cmac128_tv_template) } }, { - .alg = "compress_null", - .test = alg_test_null, - }, { .alg = "crc32", .test = alg_test_hash, .fips_allowed = 1, @@ -4657,20 +4568,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(crc32c_tv_template) } }, { - .alg = "crc64-rocksoft", - .test = alg_test_hash, - .fips_allowed = 1, - .suite = { - .hash = __VECS(crc64_rocksoft_tv_template) - } - }, { - .alg = "crct10dif", - .test = alg_test_hash, - .fips_allowed = 1, - .suite = { - .hash = __VECS(crct10dif_tv_template) - } - }, { .alg = "ctr(aes)", .test = alg_test_skcipher, .fips_allowed = 1, @@ -5079,29 +4976,36 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "ecdsa-nist-p192", - .test = alg_test_akcipher, + .test = alg_test_sig, .suite = { - .akcipher = __VECS(ecdsa_nist_p192_tv_template) + .sig = __VECS(ecdsa_nist_p192_tv_template) } }, { .alg = "ecdsa-nist-p256", - .test = alg_test_akcipher, + .test = alg_test_sig, .fips_allowed = 1, .suite = { - .akcipher = __VECS(ecdsa_nist_p256_tv_template) + .sig = __VECS(ecdsa_nist_p256_tv_template) } }, { .alg = "ecdsa-nist-p384", - .test = alg_test_akcipher, + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(ecdsa_nist_p384_tv_template) + } + }, { + .alg = "ecdsa-nist-p521", + .test = alg_test_sig, .fips_allowed = 1, .suite = { - .akcipher = __VECS(ecdsa_nist_p384_tv_template) + .sig = __VECS(ecdsa_nist_p521_tv_template) } }, { .alg = "ecrdsa", - .test = alg_test_akcipher, + .test = alg_test_sig, .suite = { - .akcipher = __VECS(ecrdsa_tv_template) + .sig = __VECS(ecrdsa_tv_template) } }, { .alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)", @@ -5288,12 +5192,9 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { - .alg = "kw(aes)", - .test = alg_test_skcipher, - .fips_allowed = 1, - .suite = { - .cipher = __VECS(aes_kw_tv_template) - } + .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .test = alg_test_aead, + .suite.aead = __VECS(krb5_test_camellia_cts_cmac) }, { .alg = "lrw(aes)", .generic_driver = "lrw(ecb(aes-generic))", @@ -5394,48 +5295,70 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(nhpoly1305_tv_template) } }, { + .alg = "p1363(ecdsa-nist-p192)", + .test = alg_test_null, + }, { + .alg = "p1363(ecdsa-nist-p256)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(p1363_ecdsa_nist_p256_tv_template) + } + }, { + .alg = "p1363(ecdsa-nist-p384)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "p1363(ecdsa-nist-p521)", + .test = alg_test_null, + .fips_allowed = 1, + }, { .alg = "pcbc(fcrypt)", .test = alg_test_skcipher, .suite = { .cipher = __VECS(fcrypt_pcbc_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha224)", + .alg = "pkcs1(rsa,none)", + .test = alg_test_sig, + .suite = { + .sig = __VECS(pkcs1_rsa_none_tv_template) + } + }, { + .alg = "pkcs1(rsa,sha224)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha256)", - .test = alg_test_akcipher, + .alg = "pkcs1(rsa,sha256)", + .test = alg_test_sig, .fips_allowed = 1, .suite = { - .akcipher = __VECS(pkcs1pad_rsa_tv_template) + .sig = __VECS(pkcs1_rsa_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha3-256)", + .alg = "pkcs1(rsa,sha3-256)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-384)", + .alg = "pkcs1(rsa,sha3-384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-512)", + .alg = "pkcs1(rsa,sha3-512)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha384)", + .alg = "pkcs1(rsa,sha384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha512)", + .alg = "pkcs1(rsa,sha512)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "poly1305", - .test = alg_test_hash, - .suite = { - .hash = __VECS(poly1305_tv_template) - } + .alg = "pkcs1pad(rsa)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "polyval", .test = alg_test_hash, @@ -5583,12 +5506,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(sha512_tv_template) } }, { - .alg = "sm2", - .test = alg_test_akcipher, - .suite = { - .akcipher = __VECS(sm2_tv_template) - } - }, { .alg = "sm3", .test = alg_test_hash, .suite = { @@ -5607,12 +5524,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(streebog512_tv_template) } }, { - .alg = "vmac64(aes)", - .test = alg_test_hash, - .suite = { - .hash = __VECS(vmac64_aes_tv_template) - } - }, { .alg = "wp256", .test = alg_test_hash, .suite = { @@ -5631,6 +5542,33 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(wp512_tv_template) } }, { + .alg = "x962(ecdsa-nist-p192)", + .test = alg_test_sig, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p192_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p256)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p256_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p384)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p384_tv_template) + } + }, { + .alg = "x962(ecdsa-nist-p521)", + .test = alg_test_sig, + .fips_allowed = 1, + .suite = { + .sig = __VECS(x962_ecdsa_nist_p521_tv_template) + } + }, { .alg = "xcbc(aes)", .test = alg_test_hash, .suite = { @@ -5778,9 +5716,8 @@ static void testmgr_onetime_init(void) alg_check_test_descs_order(); alg_check_testvec_configs(); -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n"); -#endif + if (!noslowtests) + pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n"); } static int alg_find_test(const char *alg) @@ -5869,11 +5806,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) test_done: if (rc) { - if (fips_enabled || panic_on_fail) { + if (fips_enabled) { fips_fail_notify(); - panic("alg: self-tests for %s (%s) failed in %s mode!\n", - driver, alg, - fips_enabled ? "fips" : "panic_on_fail"); + panic("alg: self-tests for %s (%s) failed in fips mode!\n", + driver, alg); } pr_warn("alg: self-tests for %s using %s failed (rc=%d)", alg, driver, rc); @@ -5918,6 +5854,6 @@ non_fips_alg: return alg_fips_disabled(driver, alg); } -#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ +#endif /* CONFIG_CRYPTO_SELFTESTS */ EXPORT_SYMBOL_GPL(alg_test); diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 986f331a5fc2..32d099ac9e73 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -21,6 +21,7 @@ #define _CRYPTO_TESTMGR_H #include <linux/oid_registry.h> +#include <crypto/internal/ecc.h> #define MAX_IVLEN 32 @@ -58,8 +59,6 @@ struct hash_testvec { * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * ( e.g. test needs to fail due to a weak key ) * @fips_skip: Skip the test vector in FIPS mode - * @generates_iv: Encryption should ignore the given IV, and output @iv_out. - * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). * @setkey_error: Expected error from setkey() * @crypt_error: Expected error from encrypt() and decrypt() */ @@ -73,7 +72,6 @@ struct cipher_testvec { unsigned short klen; unsigned int len; bool fips_skip; - bool generates_iv; int setkey_error; int crypt_error; }; @@ -150,6 +148,16 @@ struct drbg_testvec { struct akcipher_testvec { const unsigned char *key; + const unsigned char *m; + const unsigned char *c; + unsigned int key_len; + unsigned int m_size; + unsigned int c_size; + bool public_key_vec; +}; + +struct sig_testvec { + const unsigned char *key; const unsigned char *params; const unsigned char *m; const unsigned char *c; @@ -158,7 +166,6 @@ struct akcipher_testvec { unsigned int m_size; unsigned int c_size; bool public_key_vec; - bool siggen_sigver_test; enum OID algo; }; @@ -647,26 +654,731 @@ static const struct akcipher_testvec rsa_tv_template[] = { } }; +#ifdef CONFIG_CPU_BIG_ENDIAN +#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \ + 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8 +#else +#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \ + 0x##b8, 0x##b7, 0x##b6, 0x##b5, 0x##b4, 0x##b3, 0x##b2, 0x##b1 +#endif + /* * ECDSA test vectors. */ -static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { +static const struct sig_testvec ecdsa_nist_p192_tv_template[] = { { - .key = + .key = /* secp192r1(sha1) */ + "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" + "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" + "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" + "\x98", + .key_len = 49, + .m = + "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" + "\x63\x85\xe7\x82", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ad, 59, ad, 88, 27, d6, 92, 6b), + be64_to_cpua(a0, 27, 91, c6, f6, 7f, c3, 09), + be64_to_cpua(ba, e5, 93, 83, 6e, b6, 3b, 63), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(86, 80, 6f, a5, 79, 77, da, d0), + be64_to_cpua(ef, 95, 52, 7b, a0, 0f, e4, 18), + be64_to_cpua(10, 68, 01, 9d, ba, ce, 83, 08), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha224) */ + "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" + "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" + "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" + "\xa3", + .key_len = 49, + .m = + "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40" + "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(83, 7b, 12, e6, b6, 5b, cb, d4), + be64_to_cpua(14, f8, 11, 2b, 55, dc, ae, 37), + be64_to_cpua(5a, 8b, 82, 69, 7e, 8a, 0a, 09), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(a3, e3, 5c, 99, db, 92, 5b, 36), + be64_to_cpua(eb, c3, 92, 0f, 1e, 72, ee, c4), + be64_to_cpua(6a, 14, 4f, 53, 75, c8, 02, 48), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha256) */ + "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae" + "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56" + "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58" + "\x91", + .key_len = 49, + .m = + "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd" + "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(01, 48, fb, 5f, 72, 2a, d4, 8f), + be64_to_cpua(6b, 1a, 58, 56, f1, 8f, f7, fd), + be64_to_cpua(3f, 72, 3f, 1f, 42, d2, 3f, 1d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(7d, 3a, 97, d9, cd, 1a, 6a, 49), + be64_to_cpua(32, dd, 41, 74, 6a, 51, c7, d9), + be64_to_cpua(b3, 69, 43, fd, 48, 19, 86, cf), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha384) */ + "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7" + "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f" + "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e" + "\x8b", + .key_len = 49, + .m = + "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9" + "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61" + "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(dd, 15, bb, d6, 8c, a7, 03, 78), + be64_to_cpua(cf, 7f, 34, b4, b4, e5, c5, 00), + be64_to_cpua(f0, a3, 38, ce, 2b, f8, 9d, 1a), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(93, 12, 3b, 3b, 28, fb, 6d, e1), + be64_to_cpua(d1, 01, 77, 44, 5d, 53, a4, 7c), + be64_to_cpua(64, bc, 5a, 1f, 82, 96, 61, d7), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha512) */ + "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea" + "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d" + "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11" + "\x57", + .key_len = 49, + .m = + "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23" + "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67" + "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70" + "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(2b, 11, 2d, 1c, b6, 06, c9, 6c), + be64_to_cpua(dd, 3f, 07, 87, 12, a0, d4, ac), + be64_to_cpua(88, 5b, 8f, 59, 43, bf, cf, c6), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(28, 6a, df, 97, fd, 82, 76, 24), + be64_to_cpua(a9, 14, 2a, 5e, f5, e5, fb, 72), + be64_to_cpua(73, b4, 22, 9a, 98, 73, 3c, 83), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p256_tv_template[] = { + { + .key = /* secp256r1(sha1) */ + "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" + "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" + "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" + "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" + "\xaf", + .key_len = 65, + .m = + "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" + "\x0b\xde\x6a\x42", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ee, ca, 6a, 52, 0e, 48, 4d, cc), + be64_to_cpua(f7, d4, ad, 8d, 94, 5a, 69, 89), + be64_to_cpua(cf, d4, e7, b7, f0, 82, 56, 41), + be64_to_cpua(f9, 25, ce, 9f, 3a, a6, 35, 81), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(fb, 9d, 8b, de, d4, 8d, 6f, ad), + be64_to_cpua(f1, 03, 03, f3, 3b, e2, 73, f7), + be64_to_cpua(8a, fa, 54, 93, 29, a7, 70, 86), + be64_to_cpua(d7, e4, ef, 52, 66, d3, 5b, 9d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha224) */ + "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" + "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" + "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" + "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0" + "\xd4", + .key_len = 65, + .m = + "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41" + "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(7d, 25, d8, 25, f5, 81, d2, 1e), + be64_to_cpua(34, 62, 79, cb, 6a, 91, 67, 2e), + be64_to_cpua(ae, ce, 77, 59, 1a, db, 59, d5), + be64_to_cpua(20, 43, fa, c0, 9f, 9d, 7b, e7), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(ce, d5, 2e, 8b, de, 5a, 04, 0e), + be64_to_cpua(bf, 50, 05, 58, 39, 0e, 26, 92), + be64_to_cpua(76, 20, 4a, 77, 22, ec, c8, 66), + be64_to_cpua(5f, f8, 74, f8, 57, d0, 5e, 54), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha256) */ + "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" + "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" + "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" + "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" + "\xb8", + .key_len = 65, + .m = + "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" + "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(91, dc, 02, 67, dc, 0c, d0, 82), + be64_to_cpua(ac, 44, c3, e8, 24, 11, 2d, a4), + be64_to_cpua(09, dc, 29, 63, a8, 1a, ad, fc), + be64_to_cpua(08, 31, fa, 74, 0d, 1d, 21, 5d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(4f, 2a, 65, 35, 23, e3, 1d, fa), + be64_to_cpua(0a, 6e, 1b, c4, af, e1, 83, c3), + be64_to_cpua(f9, a9, 81, ac, 4a, 50, d0, 91), + be64_to_cpua(bd, ff, ce, ee, 42, c3, 97, ff), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha384) */ + "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d" + "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e" + "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00" + "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2" + "\x7c", + .key_len = 65, + .m = + "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6" + "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94" + "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(f2, e4, 6c, c7, 94, b1, d5, fe), + be64_to_cpua(08, b2, 6b, 24, 94, 48, 46, 5e), + be64_to_cpua(d0, 2e, 95, 54, d1, 95, 64, 93), + be64_to_cpua(8e, f3, 6f, dc, f8, 69, a6, 2e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(c0, 60, 11, 92, dc, 17, 89, 12), + be64_to_cpua(69, f4, 3b, 4f, 47, cf, 9b, 16), + be64_to_cpua(19, fb, 5f, 92, f4, c9, 23, 37), + be64_to_cpua(eb, a7, 80, 26, dc, f9, 3a, 44), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha512) */ + "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a" + "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38" + "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b" + "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92" + "\xbf", + .key_len = 65, + .m = + "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9" + "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca" + "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf" + "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(76, f6, 04, 99, 09, 37, 4d, fa), + be64_to_cpua(ed, 8c, 73, 30, 6c, 22, b3, 97), + be64_to_cpua(40, ea, 44, 81, 00, 4e, 29, 08), + be64_to_cpua(b8, 6d, 87, 81, 43, df, fb, 9f), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(76, 31, 79, 4a, e9, 81, 6a, ee), + be64_to_cpua(5c, ad, c3, 78, 1c, c2, c1, 19), + be64_to_cpua(f8, 00, dd, ab, d4, c0, 2b, e6), + be64_to_cpua(1e, b9, 75, 31, f6, 04, a5, 4d), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p384_tv_template[] = { + { + .key = /* secp384r1(sha1) */ + "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" + "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" + "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" + "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" + "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" + "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" + "\xf1", + .key_len = 97, + .m = + "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" + "\x3a\x69\xc1\x93", + .m_size = 20, + .c = (const unsigned char[]){ + be64_to_cpua(ec, 7c, 7e, d0, 87, d7, d7, 6e), + be64_to_cpua(78, f1, 4c, 26, e6, 5b, 86, cf), + be64_to_cpua(3a, c6, f1, 32, 3c, ce, 70, 2b), + be64_to_cpua(8d, 26, 8e, ae, 63, 3f, bc, 20), + be64_to_cpua(57, 55, 07, 20, 43, 30, de, a0), + be64_to_cpua(f5, 0f, 24, 4c, 07, 93, 6f, 21), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(79, 12, 2a, b7, c5, 15, 92, c5), + be64_to_cpua(4a, a1, 59, f1, 1c, a4, 58, 26), + be64_to_cpua(74, a0, 0f, bf, af, c3, 36, 76), + be64_to_cpua(df, 28, 8c, 1b, fa, f9, 95, 88), + be64_to_cpua(5f, 63, b1, be, 5e, 4c, 0e, a1), + be64_to_cpua(cd, bb, 7e, 81, 5d, 8f, 63, c0), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha224) */ + "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" + "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" + "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05" + "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc" + "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50" + "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d" + "\xe0", + .key_len = 97, + .m = + "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd" + "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(3f, dd, 15, 1b, 68, 2b, 9d, 8b), + be64_to_cpua(c9, 9c, 11, b8, 10, 01, c5, 41), + be64_to_cpua(c5, da, b4, e3, 93, 07, e0, 99), + be64_to_cpua(97, f1, c8, 72, 26, cf, 5a, 5e), + be64_to_cpua(ec, cb, e4, 89, 47, b2, f7, bc), + be64_to_cpua(8a, 51, 84, ce, 13, 1e, d2, dc), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(88, 2b, 82, 26, 5e, 1c, da, fb), + be64_to_cpua(9f, 19, d0, 42, 8b, 93, c2, 11), + be64_to_cpua(4d, d0, c6, 6e, b0, e9, fc, 14), + be64_to_cpua(df, d8, 68, a2, 64, 42, 65, f3), + be64_to_cpua(4b, 00, 08, 31, 6c, f5, d5, f6), + be64_to_cpua(8b, 03, 2c, fc, 1f, d1, a9, a4), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha256) */ + "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a" + "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a" + "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e" + "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4" + "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52" + "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc" + "\xab", + .key_len = 97, + .m = + "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3" + "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(c8, 8d, 2c, 79, 3a, 8e, 32, c4), + be64_to_cpua(b6, c6, fc, 70, 2e, 66, 3c, 77), + be64_to_cpua(af, 06, 3f, 84, 04, e2, f9, 67), + be64_to_cpua(cc, 47, 53, 87, bc, bd, 83, 3f), + be64_to_cpua(8e, 3f, 7e, ce, 0a, 9b, aa, 59), + be64_to_cpua(08, 09, 12, 9d, 6e, 96, 64, a6), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(10, 0e, f4, 1f, 39, ca, 4d, 43), + be64_to_cpua(4f, 8d, de, 1e, 93, 8d, 95, bb), + be64_to_cpua(15, 68, c0, 75, 3e, 23, 5e, 36), + be64_to_cpua(dd, ce, bc, b2, 97, f4, 9c, f3), + be64_to_cpua(26, a2, b0, 89, 42, 0a, da, d9), + be64_to_cpua(40, 34, b8, 90, a9, 80, ab, 47), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha384) */ + "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f" + "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9" + "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c" + "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a" + "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9" + "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89" + "\x9e", + .key_len = 97, + .m = + "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf" + "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1" + "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(a2, a4, c8, f2, ea, 9d, 11, 1f), + be64_to_cpua(3b, 1f, 07, 8f, 15, 02, fe, 1d), + be64_to_cpua(29, e6, fb, ca, 8c, d6, b6, b4), + be64_to_cpua(2d, 7a, 91, 5f, 49, 2d, 22, 08), + be64_to_cpua(ee, 2e, 62, 35, 46, fa, 00, d8), + be64_to_cpua(9b, 28, 68, c0, a1, ea, 8c, 50), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(ab, 8d, 4e, de, e6, 6d, 9b, 66), + be64_to_cpua(96, 17, 04, c9, 05, 77, f1, 8e), + be64_to_cpua(44, 92, 8c, 86, 99, 65, b3, 97), + be64_to_cpua(71, cd, 8f, 18, 99, f0, 0f, 13), + be64_to_cpua(bf, e3, 75, 24, 49, ac, fb, c8), + be64_to_cpua(fc, 50, f6, 43, bd, 50, 82, 0e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, { + .key = /* secp384r1(sha512) */ + "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46" + "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4" + "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95" + "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe" + "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa" + "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac" + "\xa3", + .key_len = 97, + .m = + "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1" + "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71" + "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc" + "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(3e, b3, c7, a8, b3, 17, 77, d1), + be64_to_cpua(dc, 2b, 43, 0e, 6a, b3, 53, 6f), + be64_to_cpua(4c, fc, 6f, 80, e3, af, b3, d9), + be64_to_cpua(9a, 02, de, 93, e8, 83, e4, 84), + be64_to_cpua(4d, c6, ef, da, 02, e7, 0f, 52), + be64_to_cpua(00, 1d, 20, 94, 77, fe, 31, fa), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(4e, 45, cf, 3c, 93, ff, 50, 5d), + be64_to_cpua(34, e4, 8b, 80, a5, b6, da, 2c), + be64_to_cpua(c4, 6a, 03, 5f, 8d, 7a, f9, fb), + be64_to_cpua(ec, 63, e3, 0c, ec, 50, dc, cc), + be64_to_cpua(de, 3a, 3d, 16, af, b4, 52, 6a), + be64_to_cpua(63, f6, f0, 3d, 5f, 5f, 99, 3f), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +static const struct sig_testvec ecdsa_nist_p521_tv_template[] = { + { + .key = /* secp521r1(sha224) */ + "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0" + "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7" + "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61" + "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb" + "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a" + "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76" + "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47" + "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f" + "\x3b\x83\x82\x2f\x14", + .key_len = 133, + .m = + "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4" + "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb", + .m_size = 28, + .c = (const unsigned char[]){ + be64_to_cpua(46, 6b, c7, af, 7a, b9, 19, 0a), + be64_to_cpua(6c, a6, 9b, 89, 8b, 1e, fd, 09), + be64_to_cpua(98, 85, 29, 88, ff, 0b, 94, 94), + be64_to_cpua(18, c6, 37, 8a, cb, a7, d8, 7d), + be64_to_cpua(f8, 3f, 59, 0f, 74, f0, 3f, d8), + be64_to_cpua(e2, ef, 07, 92, ee, 60, 94, 06), + be64_to_cpua(35, f6, dc, 6d, 02, 7b, 22, ac), + be64_to_cpua(d6, 43, e7, ff, 42, b2, ba, 74), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 01), + be64_to_cpua(50, b1, a5, 98, 92, 2a, a5, 52), + be64_to_cpua(1c, ad, 22, da, 82, 00, 35, a3), + be64_to_cpua(0e, 64, cc, c4, e8, 43, d9, 0e), + be64_to_cpua(30, 90, 0f, 1c, 8f, 78, d3, 9f), + be64_to_cpua(26, 0b, 5f, 49, 32, 6b, 91, 99), + be64_to_cpua(0f, f8, 65, 97, 6b, 09, 4d, 22), + be64_to_cpua(5e, f9, 88, f3, d2, 32, 90, 57), + be64_to_cpua(26, 0d, 55, cd, 23, 1e, 7d, a0), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 3a) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha256) */ + "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae" + "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14" + "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36" + "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81" + "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c" + "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09" + "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05" + "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3" + "\xe6\x77\x1e\x1f\x8a", + .key_len = 133, + .m = + "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39" + "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73", + .m_size = 32, + .c = (const unsigned char[]){ + be64_to_cpua(de, 7e, d7, 59, 10, e9, d9, d5), + be64_to_cpua(38, 1f, 46, 0b, 04, 64, 34, 79), + be64_to_cpua(ae, ce, 54, 76, 9a, c2, 8f, b8), + be64_to_cpua(95, 35, 6f, 02, 0e, af, e1, 4c), + be64_to_cpua(56, 3c, f6, f0, d8, e1, b7, 5d), + be64_to_cpua(50, 9f, 7d, 1f, ca, 8b, a8, 2d), + be64_to_cpua(06, 0f, fd, 83, fc, 0e, d9, ce), + be64_to_cpua(a5, 5f, 57, 52, 27, 78, 3a, b5), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, cd), + be64_to_cpua(55, 38, b6, f6, 34, 65, c7, bd), + be64_to_cpua(1c, 57, 56, 8f, 12, b7, 1d, 91), + be64_to_cpua(03, 42, 02, 5f, 50, f0, a2, 0d), + be64_to_cpua(fa, 10, dd, 9b, fb, 36, 1a, 31), + be64_to_cpua(e0, 87, 2c, 44, 4b, 5a, ee, af), + be64_to_cpua(a9, 79, 24, b9, 37, 35, dd, a0), + be64_to_cpua(6b, 35, ae, 65, b5, 99, 12, 0a), + be64_to_cpua(50, 85, 38, f9, 15, 83, 18, 04), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, cf) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha384) */ + "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b" + "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33" + "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4" + "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c" + "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47" + "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14" + "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46" + "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39" + "\xb3\x3b\x5b\x1b\x94", + .key_len = 133, + .m = + "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26" + "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59" + "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76", + .m_size = 48, + .c = (const unsigned char[]){ + be64_to_cpua(b8, 6a, dd, fb, e6, 63, 4e, 28), + be64_to_cpua(84, 59, fd, 1a, c4, 40, dd, 43), + be64_to_cpua(32, 76, 06, d0, f9, c0, e4, e6), + be64_to_cpua(e4, df, 9b, 7d, 9e, 47, ca, 33), + be64_to_cpua(7e, 42, 71, 86, 57, 2d, f1, 7d), + be64_to_cpua(f2, 4b, 64, 98, f7, ec, da, c7), + be64_to_cpua(ec, 51, dc, e8, 35, 5e, ae, 16), + be64_to_cpua(96, 76, 3c, 27, ea, aa, 9c, 26), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 93), + be64_to_cpua(c6, 4f, ab, 2b, 62, c1, 42, b1), + be64_to_cpua(e5, 5a, 94, 56, cf, 8f, b4, 22), + be64_to_cpua(6a, c3, f3, 7a, d1, fa, e7, a7), + be64_to_cpua(df, c4, c0, db, 54, db, 8a, 0d), + be64_to_cpua(da, a7, cd, 26, 28, 76, 3b, 52), + be64_to_cpua(e4, 3c, bc, 93, 65, 57, 1c, 30), + be64_to_cpua(55, ce, 37, 97, c9, 05, 51, e5), + be64_to_cpua(c3, 6a, 87, 6e, b5, 13, 1f, 20), + be64_to_cpua(00, 00, 00, 00, 00, 00, 00, ff) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha512) */ + "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f" + "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a" + "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f" + "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee" + "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e" + "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6" + "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77" + "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae" + "\xd0\x17\xdf\x49\x6a", + .key_len = 133, + .m = + "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69" + "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed" + "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3" + "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68", + .m_size = 64, + .c = (const unsigned char[]){ + be64_to_cpua(28, b5, 04, b0, b6, 33, 1c, 7e), + be64_to_cpua(80, a6, 13, fc, b6, 90, f7, bb), + be64_to_cpua(27, 93, e8, 6c, 49, 7d, 28, fc), + be64_to_cpua(1f, 12, 3e, b7, 7e, 51, ff, 7f), + be64_to_cpua(fb, 62, 1e, 42, 03, 6c, 74, 8a), + be64_to_cpua(63, 0e, 02, cc, 94, a9, 05, b9), + be64_to_cpua(aa, 86, ec, a8, 05, 03, 52, 56), + be64_to_cpua(71, 86, 96, ac, 21, 33, 7e, 4e), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 5c), + be64_to_cpua(46, 1e, 77, 44, 78, e0, d1, 04), + be64_to_cpua(72, 74, 13, 63, 39, a6, e5, 25), + be64_to_cpua(00, 55, bb, 6a, b4, 73, 00, d2), + be64_to_cpua(71, d0, e9, ca, a7, c0, cb, aa), + be64_to_cpua(7a, 76, 37, 51, 47, 49, 98, 12), + be64_to_cpua(88, 05, 3e, 43, 39, 01, bd, b7), + be64_to_cpua(95, 35, 89, 4f, 41, 5f, 9e, 19), + be64_to_cpua(43, 52, 1d, e3, c6, bd, 5a, 40), + be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 70) }, + .c_size = ECC_MAX_BYTES * 2, + .public_key_vec = true, + }, +}; + +/* + * ECDSA X9.62 test vectors. + * + * Identical to ECDSA test vectors, except signature in "c" is X9.62 encoded. + */ +static const struct sig_testvec x962_ecdsa_nist_p192_tv_template[] = { + { + .key = /* secp192r1(sha1) */ + "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" + "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" + "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" + "\x98", + .key_len = 49, + .m = + "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" + "\x63\x85\xe7\x82", + .m_size = 20, + .c = + "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" + "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" + "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86" + "\x80\x6f\xa5\x79\x77\xda\xd0", + .c_size = 55, + .public_key_vec = true, + }, { + .key = /* secp192r1(sha224) */ "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" "\xa3", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40" "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b" "\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14" @@ -674,23 +1386,17 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x5c\x99\xdb\x92\x5b\x36", .c_size = 54, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha256) */ "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae" "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56" "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58" "\x91", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd" "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56" "\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3" @@ -698,24 +1404,18 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x3a\x97\xd9\xcd\x1a\x6a\x49", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha384) */ "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7" "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f" "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e" "\x8b", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9" "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61" "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34" "\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64" @@ -723,25 +1423,19 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x12\x3b\x3b\x28\xfb\x6d\xe1", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp192r1(sha512) */ "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea" "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d" "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11" "\x57", .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, .m = "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23" "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67" "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70" "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07" "\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73" @@ -749,28 +1443,42 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { "\x6a\xdf\x97\xfd\x82\x76\x24", .c_size = 55, .public_key_vec = true, - .siggen_sigver_test = true, }, }; -static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { +static const struct sig_testvec x962_ecdsa_nist_p256_tv_template[] = { { - .key = + .key = /* secp256r1(sha1) */ + "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" + "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" + "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" + "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" + "\xaf", + .key_len = 65, + .m = + "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" + "\x0b\xde\x6a\x42", + .m_size = 20, + .c = + "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" + "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" + "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d" + "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7" + "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", + .c_size = 72, + .public_key_vec = true, + }, { + .key = /* secp256r1(sha224) */ "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0" "\xd4", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41" "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59" "\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25" @@ -779,24 +1487,18 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x2e\x8b\xde\x5a\x04\x0e", .c_size = 70, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha256) */ "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" "\xb8", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63" "\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67" @@ -805,25 +1507,19 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x2a\x65\x35\x23\xe3\x1d\xfa", .c_size = 71, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha384) */ "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d" "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e" "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00" "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2" "\x7c", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6" "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94" "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95" "\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c" @@ -832,26 +1528,20 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\xc0\x60\x11\x92\xdc\x17\x89\x12", .c_size = 72, .public_key_vec = true, - .siggen_sigver_test = true, }, { - .key = + .key = /* secp256r1(sha512) */ "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a" "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38" "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b" "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92" "\xbf", .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, .m = "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9" "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca" "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf" "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44" "\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04" @@ -860,12 +1550,35 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { "\x31\x79\x4a\xe9\x81\x6a\xee", .c_size = 71, .public_key_vec = true, - .siggen_sigver_test = true, }, }; -static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { +static const struct sig_testvec x962_ecdsa_nist_p384_tv_template[] = { { + .key = /* secp384r1(sha1) */ + "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" + "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" + "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" + "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" + "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" + "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" + "\xf1", + .key_len = 97, + .m = + "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" + "\x3a\x69\xc1\x93", + .m_size = 20, + .c = + "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" + "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" + "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e" + "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0" + "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88" + "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26" + "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", + .c_size = 104, + .public_key_vec = true, + }, { .key = /* secp384r1(sha224) */ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" @@ -875,15 +1588,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d" "\xe0", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd" "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5", .m_size = 28, - .algo = OID_id_ecdsa_with_sha224, .c = "\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4" "\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4" @@ -894,7 +1602,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x88\x2b\x82\x26\x5e\x1c\xda\xfb", .c_size = 104, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha256) */ "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a" @@ -905,15 +1612,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc" "\xab", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3" "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2", .m_size = 32, - .algo = OID_id_ecdsa_with_sha256, .c = "\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce" "\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84" @@ -924,7 +1626,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xf4\x1f\x39\xca\x4d\x43", .c_size = 102, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha384) */ "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f" @@ -935,16 +1636,11 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89" "\x9e", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf" "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1" "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff", .m_size = 48, - .algo = OID_id_ecdsa_with_sha384, .c = "\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62" "\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb" @@ -955,7 +1651,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xab\x8d\x4e\xde\xe6\x6d\x9b\x66", .c_size = 104, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = /* secp384r1(sha512) */ "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46" @@ -966,17 +1661,12 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac" "\xa3", .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, .m = "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1" "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71" "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc" "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e", .m_size = 64, - .algo = OID_id_ecdsa_with_sha512, .c = "\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02" "\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3" @@ -987,14 +1677,163 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { "\x3c\x93\xff\x50\x5d", .c_size = 101, .public_key_vec = true, - .siggen_sigver_test = true, + }, +}; + +static const struct sig_testvec x962_ecdsa_nist_p521_tv_template[] = { + { + .key = /* secp521r1(sha224) */ + "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0" + "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7" + "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61" + "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb" + "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a" + "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76" + "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47" + "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f" + "\x3b\x83\x82\x2f\x14", + .key_len = 133, + .m = + "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4" + "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb", + .m_size = 28, + .c = + "\x30\x81\x86\x02\x41\x01\xd6\x43\xe7\xff\x42\xb2\xba\x74\x35\xf6" + "\xdc\x6d\x02\x7b\x22\xac\xe2\xef\x07\x92\xee\x60\x94\x06\xf8\x3f" + "\x59\x0f\x74\xf0\x3f\xd8\x18\xc6\x37\x8a\xcb\xa7\xd8\x7d\x98\x85" + "\x29\x88\xff\x0b\x94\x94\x6c\xa6\x9b\x89\x8b\x1e\xfd\x09\x46\x6b" + "\xc7\xaf\x7a\xb9\x19\x0a\x02\x41\x3a\x26\x0d\x55\xcd\x23\x1e\x7d" + "\xa0\x5e\xf9\x88\xf3\xd2\x32\x90\x57\x0f\xf8\x65\x97\x6b\x09\x4d" + "\x22\x26\x0b\x5f\x49\x32\x6b\x91\x99\x30\x90\x0f\x1c\x8f\x78\xd3" + "\x9f\x0e\x64\xcc\xc4\xe8\x43\xd9\x0e\x1c\xad\x22\xda\x82\x00\x35" + "\xa3\x50\xb1\xa5\x98\x92\x2a\xa5\x52", + .c_size = 137, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha256) */ + "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae" + "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14" + "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36" + "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81" + "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c" + "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09" + "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05" + "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3" + "\xe6\x77\x1e\x1f\x8a", + .key_len = 133, + .m = + "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39" + "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73", + .m_size = 32, + .c = + "\x30\x81\x88\x02\x42\x00\xcd\xa5\x5f\x57\x52\x27\x78\x3a\xb5\x06" + "\x0f\xfd\x83\xfc\x0e\xd9\xce\x50\x9f\x7d\x1f\xca\x8b\xa8\x2d\x56" + "\x3c\xf6\xf0\xd8\xe1\xb7\x5d\x95\x35\x6f\x02\x0e\xaf\xe1\x4c\xae" + "\xce\x54\x76\x9a\xc2\x8f\xb8\x38\x1f\x46\x0b\x04\x64\x34\x79\xde" + "\x7e\xd7\x59\x10\xe9\xd9\xd5\x02\x42\x01\xcf\x50\x85\x38\xf9\x15" + "\x83\x18\x04\x6b\x35\xae\x65\xb5\x99\x12\x0a\xa9\x79\x24\xb9\x37" + "\x35\xdd\xa0\xe0\x87\x2c\x44\x4b\x5a\xee\xaf\xfa\x10\xdd\x9b\xfb" + "\x36\x1a\x31\x03\x42\x02\x5f\x50\xf0\xa2\x0d\x1c\x57\x56\x8f\x12" + "\xb7\x1d\x91\x55\x38\xb6\xf6\x34\x65\xc7\xbd", + .c_size = 139, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha384) */ + "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b" + "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33" + "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4" + "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c" + "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47" + "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14" + "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46" + "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39" + "\xb3\x3b\x5b\x1b\x94", + .key_len = 133, + .m = + "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26" + "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59" + "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76", + .m_size = 48, + .c = + "\x30\x81\x88\x02\x42\x00\x93\x96\x76\x3c\x27\xea\xaa\x9c\x26\xec" + "\x51\xdc\xe8\x35\x5e\xae\x16\xf2\x4b\x64\x98\xf7\xec\xda\xc7\x7e" + "\x42\x71\x86\x57\x2d\xf1\x7d\xe4\xdf\x9b\x7d\x9e\x47\xca\x33\x32" + "\x76\x06\xd0\xf9\xc0\xe4\xe6\x84\x59\xfd\x1a\xc4\x40\xdd\x43\xb8" + "\x6a\xdd\xfb\xe6\x63\x4e\x28\x02\x42\x00\xff\xc3\x6a\x87\x6e\xb5" + "\x13\x1f\x20\x55\xce\x37\x97\xc9\x05\x51\xe5\xe4\x3c\xbc\x93\x65" + "\x57\x1c\x30\xda\xa7\xcd\x26\x28\x76\x3b\x52\xdf\xc4\xc0\xdb\x54" + "\xdb\x8a\x0d\x6a\xc3\xf3\x7a\xd1\xfa\xe7\xa7\xe5\x5a\x94\x56\xcf" + "\x8f\xb4\x22\xc6\x4f\xab\x2b\x62\xc1\x42\xb1", + .c_size = 139, + .public_key_vec = true, + }, + { + .key = /* secp521r1(sha512) */ + "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f" + "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a" + "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f" + "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee" + "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e" + "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6" + "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77" + "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae" + "\xd0\x17\xdf\x49\x6a", + .key_len = 133, + .m = + "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69" + "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed" + "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3" + "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68", + .m_size = 64, + .c = + "\x30\x81\x88\x02\x42\x01\x5c\x71\x86\x96\xac\x21\x33\x7e\x4e\xaa" + "\x86\xec\xa8\x05\x03\x52\x56\x63\x0e\x02\xcc\x94\xa9\x05\xb9\xfb" + "\x62\x1e\x42\x03\x6c\x74\x8a\x1f\x12\x3e\xb7\x7e\x51\xff\x7f\x27" + "\x93\xe8\x6c\x49\x7d\x28\xfc\x80\xa6\x13\xfc\xb6\x90\xf7\xbb\x28" + "\xb5\x04\xb0\xb6\x33\x1c\x7e\x02\x42\x01\x70\x43\x52\x1d\xe3\xc6" + "\xbd\x5a\x40\x95\x35\x89\x4f\x41\x5f\x9e\x19\x88\x05\x3e\x43\x39" + "\x01\xbd\xb7\x7a\x76\x37\x51\x47\x49\x98\x12\x71\xd0\xe9\xca\xa7" + "\xc0\xcb\xaa\x00\x55\xbb\x6a\xb4\x73\x00\xd2\x72\x74\x13\x63\x39" + "\xa6\xe5\x25\x46\x1e\x77\x44\x78\xe0\xd1\x04", + .c_size = 139, + .public_key_vec = true, + }, +}; + +/* + * ECDSA P1363 test vectors. + * + * Identical to ECDSA test vectors, except signature in "c" is P1363 encoded. + */ +static const struct sig_testvec p1363_ecdsa_nist_p256_tv_template[] = { + { + .key = /* secp256r1(sha256) */ + "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" + "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" + "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" + "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" + "\xb8", + .key_len = 65, + .m = + "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" + "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", + .m_size = 32, + .c = + "\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63\xa8\x1a\xad\xfc" + "\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67\xdc\x0c\xd0\x82" + "\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9\xa9\x81\xac\x4a\x50\xd0\x91" + "\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f\x2a\x65\x35\x23\xe3\x1d\xfa", + .c_size = 64, + .public_key_vec = true, }, }; /* * EC-RDSA test vectors are generated by gost-engine. */ -static const struct akcipher_testvec ecrdsa_tv_template[] = { +static const struct sig_testvec ecrdsa_tv_template[] = { { .key = "\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05" @@ -1019,7 +1858,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1045,7 +1883,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1071,7 +1908,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39", .m_size = 32, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1106,7 +1942,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f", .m_size = 64, .public_key_vec = true, - .siggen_sigver_test = true, }, { .key = @@ -1141,14 +1976,68 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { "\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc", .m_size = 64, .public_key_vec = true, - .siggen_sigver_test = true, + }, +}; + +/* + * PKCS#1 RSA test vectors for hash algorithm "none" + * (i.e. the hash in "m" is not prepended by a Full Hash Prefix) + * + * Obtained from: + * https://vcsjones.dev/sometimes-valid-rsa-dotnet/ + * https://gist.github.com/vcsjones/ab4c2327b53ed018eada76b75ef4fd99 + */ +static const struct sig_testvec pkcs1_rsa_none_tv_template[] = { + { + .key = + "\x30\x82\x01\x0a\x02\x82\x01\x01\x00\xa2\x63\x0b\x39\x44\xb8\xbb" + "\x23\xa7\x44\x49\xbb\x0e\xff\xa1\xf0\x61\x0a\x53\x93\xb0\x98\xdb" + "\xad\x2c\x0f\x4a\xc5\x6e\xff\x86\x3c\x53\x55\x0f\x15\xce\x04\x3f" + "\x2b\xfd\xa9\x96\x96\xd9\xbe\x61\x79\x0b\x5b\xc9\x4c\x86\x76\xe5" + "\xe0\x43\x4b\x22\x95\xee\xc2\x2b\x43\xc1\x9f\xd8\x68\xb4\x8e\x40" + "\x4f\xee\x85\x38\xb9\x11\xc5\x23\xf2\x64\x58\xf0\x15\x32\x6f\x4e" + "\x57\xa1\xae\x88\xa4\x02\xd7\x2a\x1e\xcd\x4b\xe1\xdd\x63\xd5\x17" + "\x89\x32\x5b\xb0\x5e\x99\x5a\xa8\x9d\x28\x50\x0e\x17\xee\x96\xdb" + "\x61\x3b\x45\x51\x1d\xcf\x12\x56\x0b\x92\x47\xfc\xab\xae\xf6\x66" + "\x3d\x47\xac\x70\x72\xe7\x92\xe7\x5f\xcd\x10\xb9\xc4\x83\x64\x94" + "\x19\xbd\x25\x80\xe1\xe8\xd2\x22\xa5\xd0\xba\x02\x7a\xa1\x77\x93" + "\x5b\x65\xc3\xee\x17\x74\xbc\x41\x86\x2a\xdc\x08\x4c\x8c\x92\x8c" + "\x91\x2d\x9e\x77\x44\x1f\x68\xd6\xa8\x74\x77\xdb\x0e\x5b\x32\x8b" + "\x56\x8b\x33\xbd\xd9\x63\xc8\x49\x9d\x3a\xc5\xc5\xea\x33\x0b\xd2" + "\xf1\xa3\x1b\xf4\x8b\xbe\xd9\xb3\x57\x8b\x3b\xde\x04\xa7\x7a\x22" + "\xb2\x24\xae\x2e\xc7\x70\xc5\xbe\x4e\x83\x26\x08\xfb\x0b\xbd\xa9" + "\x4f\x99\x08\xe1\x10\x28\x72\xaa\xcd\x02\x03\x01\x00\x01", + .key_len = 270, + .m = + "\x68\xb4\xf9\x26\x34\x31\x25\xdd\x26\x50\x13\x68\xc1\x99\x26\x71" + "\x19\xa2\xde\x81", + .m_size = 20, + .c = + "\x6a\xdb\x39\xe5\x63\xb3\x25\xde\x58\xca\xc3\xf1\x36\x9c\x0b\x36" + "\xb7\xd6\x69\xf9\xba\xa6\x68\x14\x8c\x24\x52\xd3\x25\xa5\xf3\xad" + "\xc9\x47\x44\xde\x06\xd8\x0f\x56\xca\x2d\xfb\x0f\xe9\x99\xe2\x9d" + "\x8a\xe8\x7f\xfb\x9a\x99\x96\xf1\x2c\x4a\xe4\xc0\xae\x4d\x29\x47" + "\x38\x96\x51\x2f\x6d\x8e\xb8\x88\xbd\x1a\x0a\x70\xbc\x23\x38\x67" + "\x62\x22\x01\x23\x71\xe5\xbb\x95\xea\x6b\x8d\x31\x62\xbf\xf0\xc4" + "\xb9\x46\xd6\x67\xfc\x4c\xe6\x1f\xd6\x5d\xf7\xa9\xad\x3a\xf1\xbf" + "\xa2\xf9\x66\xde\xb6\x8e\xec\x8f\x81\x8d\x1e\x3a\x12\x27\x6a\xfc" + "\xae\x92\x9f\xc3\x87\xc3\xba\x8d\x04\xb8\x8f\x0f\x61\x68\x9a\x96" + "\x2c\x80\x2c\x32\x40\xde\x9d\xb9\x9b\xe2\xe4\x45\x2e\x91\x47\x5c" + "\x47\xa4\x9d\x02\x57\x59\xf7\x75\x5d\x5f\x32\x82\x75\x5d\xe5\x78" + "\xc9\x19\x61\x46\x06\x9d\xa5\x1d\xd6\x32\x48\x9a\xdb\x09\x29\x81" + "\x14\x2e\xf0\x27\xe9\x37\x13\x74\xec\xa5\xcd\x67\x6b\x19\xf6\x88" + "\xf0\xc2\x8b\xa8\x7f\x2f\x76\x5a\x3e\x0c\x47\x5d\xe8\x82\x50\x27" + "\x40\xce\x27\x41\x45\xa0\xcf\xaa\x2f\xd3\xad\x3c\xbf\x73\xff\x93" + "\xe3\x78\x49\xd9\xa9\x78\x22\x81\x9a\xe5\xe2\x94\xe9\x40\xab\xf1", + .c_size = 256, + .public_key_vec = true, }, }; /* * PKCS#1 RSA test vectors. Obtained from CAVS testing. */ -static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { +static const struct sig_testvec pkcs1_rsa_tv_template[] = { { .key = "\x30\x82\x04\xa5\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" @@ -1260,7 +2149,6 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b" "\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2", .c_size = 256, - .siggen_sigver_test = true, } }; @@ -5129,309 +6017,6 @@ static const struct hash_testvec rmd160_tv_template[] = { } }; -static const u8 zeroes[4096] = { [0 ... 4095] = 0 }; -static const u8 ones[4096] = { [0 ... 4095] = 0xff }; - -static const struct hash_testvec crc64_rocksoft_tv_template[] = { - { - .plaintext = zeroes, - .psize = 4096, - .digest = "\x4e\xb6\x22\xeb\x67\xd3\x82\x64", - }, { - .plaintext = ones, - .psize = 4096, - .digest = "\xac\xa3\xec\x02\x73\xba\xdd\xc0", - } -}; - -static const struct hash_testvec crct10dif_tv_template[] = { - { - .plaintext = "abc", - .psize = 3, - .digest = (u8 *)(u16 []){ 0x443b }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 79, - .digest = (u8 *)(u16 []){ 0x4b70 }, - }, { - .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" - "ddddddddddddd", - .psize = 56, - .digest = (u8 *)(u16 []){ 0x9ce3 }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 319, - .digest = (u8 *)(u16 []){ 0x44c6 }, - }, { - .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" - "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" - "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a" - "\xa1\x38\xcf\x43\xda\x71\x08\x7c" - "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee" - "\x85\x1c\x90\x27\xbe\x32\xc9\x60" - "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2" - "\x46\xdd\x74\x0b\x7f\x16\xad\x21" - "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93" - "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05" - "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77" - "\x0e\x82\x19\xb0\x24\xbb\x52\xe9" - "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38" - "\xcf\x66\xfd\x71\x08\x9f\x13\xaa" - "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c" - "\xb3\x27\xbe\x55\xec\x60\xf7\x8e" - "\x02\x99\x30\xc7\x3b\xd2\x69\x00" - "\x74\x0b\xa2\x16\xad\x44\xdb\x4f" - "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1" - "\x58\xef\x63\xfa\x91\x05\x9c\x33" - "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5" - "\x19\xb0\x47\xde\x52\xe9\x80\x17" - "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66" - "\xfd\x94\x08\x9f\x36\xcd\x41\xd8" - "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a" - "\xe1\x55\xec\x83\x1a\x8e\x25\xbc" - "\x30\xc7\x5e\xf5\x69\x00\x97\x0b" - "\xa2\x39\xd0\x44\xdb\x72\x09\x7d" - "\x14\xab\x1f\xb6\x4d\xe4\x58\xef" - "\x86\x1d\x91\x28\xbf\x33\xca\x61" - "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3" - "\x47\xde\x75\x0c\x80\x17\xae\x22" - "\xb9\x50\xe7\x5b\xf2\x89\x20\x94" - "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06" - "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78" - "\x0f\x83\x1a\xb1\x25\xbc\x53\xea" - "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39" - "\xd0\x67\xfe\x72\x09\xa0\x14\xab" - "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d" - "\xb4\x28\xbf\x56\xed\x61\xf8\x8f" - "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01" - "\x75\x0c\xa3\x17\xae\x45\xdc\x50" - "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2" - "\x59\xf0\x64\xfb\x92\x06\x9d\x34" - "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6" - "\x1a\xb1\x48\xdf\x53\xea\x81\x18" - "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67" - "\xfe\x95\x09\xa0\x37\xce\x42\xd9" - "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b" - "\xe2\x56\xed\x84\x1b\x8f\x26\xbd" - "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c" - "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e" - "\x15\xac\x20\xb7\x4e\xe5\x59\xf0" - "\x87\x1e\x92\x29\xc0\x34\xcb\x62" - "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4" - "\x48\xdf\x76\x0d\x81\x18\xaf\x23" - "\xba\x51\xe8\x5c\xf3\x8a\x21\x95" - "\x2c\xc3\x37\xce\x65\xfc\x70\x07" - "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79" - "\x10\x84\x1b\xb2\x26\xbd\x54\xeb" - "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a" - "\xd1\x68\xff\x73\x0a\xa1\x15\xac" - "\x43\xda\x4e\xe5\x7c\x13\x87\x1e" - "\xb5\x29\xc0\x57\xee\x62\xf9\x90" - "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02" - "\x76\x0d\xa4\x18\xaf\x46\xdd\x51" - "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3" - "\x5a\xf1\x65\xfc\x93\x07\x9e\x35" - "\xcc\x40\xd7\x6e\x05\x79\x10\xa7" - "\x1b\xb2\x49\xe0\x54\xeb\x82\x19" - "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68" - "\xff\x96\x0a\xa1\x38\xcf\x43\xda" - "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c" - "\xe3\x57\xee\x85\x1c\x90\x27\xbe" - "\x32\xc9\x60\xf7\x6b\x02\x99\x0d" - "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f" - "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1" - "\x88\x1f\x93\x2a\xc1\x35\xcc\x63" - "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5" - "\x49\xe0\x77\x0e\x82\x19\xb0\x24" - "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96" - "\x2d\xc4\x38\xcf\x66\xfd\x71\x08" - "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a" - "\x11\x85\x1c\xb3\x27\xbe\x55\xec" - "\x60\xf7\x8e\x02\x99\x30\xc7\x3b" - "\xd2\x69\x00\x74\x0b\xa2\x16\xad" - "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f" - "\xb6\x2a\xc1\x58\xef\x63\xfa\x91" - "\x05\x9c\x33\xca\x3e\xd5\x6c\x03" - "\x77\x0e\xa5\x19\xb0\x47\xde\x52" - "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4" - "\x5b\xf2\x66\xfd\x94\x08\x9f\x36" - "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8" - "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a" - "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69" - "\x00\x97\x0b\xa2\x39\xd0\x44\xdb" - "\x72\x09\x7d\x14\xab\x1f\xb6\x4d" - "\xe4\x58\xef\x86\x1d\x91\x28\xbf" - "\x33\xca\x61\xf8\x6c\x03\x9a\x0e" - "\xa5\x3c\xd3\x47\xde\x75\x0c\x80" - "\x17\xae\x22\xb9\x50\xe7\x5b\xf2" - "\x89\x20\x94\x2b\xc2\x36\xcd\x64" - "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6" - "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25" - "\xbc\x53\xea\x5e\xf5\x8c\x00\x97" - "\x2e\xc5\x39\xd0\x67\xfe\x72\x09" - "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b" - "\x12\x86\x1d\xb4\x28\xbf\x56\xed" - "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c" - "\xd3\x6a\x01\x75\x0c\xa3\x17\xae" - "\x45\xdc\x50\xe7\x7e\x15\x89\x20" - "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92" - "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04" - "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53" - "\xea\x81\x18\x8c\x23\xba\x2e\xc5" - "\x5c\xf3\x67\xfe\x95\x09\xa0\x37" - "\xce\x42\xd9\x70\x07\x7b\x12\xa9" - "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b" - "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a" - "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc" - "\x73\x0a\x7e\x15\xac\x20\xb7\x4e" - "\xe5\x59\xf0\x87\x1e\x92\x29\xc0" - "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f" - "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81" - "\x18\xaf\x23\xba\x51\xe8\x5c\xf3" - "\x8a\x21\x95\x2c\xc3\x37\xce\x65" - "\xfc\x70\x07\x9e\x12\xa9\x40\xd7" - "\x4b\xe2\x79\x10\x84\x1b\xb2\x26" - "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98" - "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a" - "\xa1\x15\xac\x43\xda\x4e\xe5\x7c" - "\x13\x87\x1e\xb5\x29\xc0\x57\xee" - "\x62\xf9\x90\x04\x9b\x32\xc9\x3d" - "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf" - "\x46\xdd\x51\xe8\x7f\x16\x8a\x21" - "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93" - "\x07\x9e\x35\xcc\x40\xd7\x6e\x05" - "\x79\x10\xa7\x1b\xb2\x49\xe0\x54" - "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6" - "\x5d\xf4\x68\xff\x96\x0a\xa1\x38" - "\xcf\x43\xda\x71\x08\x7c\x13\xaa" - "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c" - "\x90\x27\xbe\x32\xc9\x60\xf7\x6b" - "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd" - "\x74\x0b\x7f\x16\xad\x21\xb8\x4f" - "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1" - "\x35\xcc\x63\xfa\x6e\x05\x9c\x10" - "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82" - "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4" - "\x8b\x22\x96\x2d\xc4\x38\xcf\x66" - "\xfd\x71\x08\x9f\x13\xaa\x41\xd8" - "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27" - "\xbe\x55\xec\x60\xf7\x8e\x02\x99" - "\x30\xc7\x3b\xd2\x69\x00\x74\x0b" - "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d" - "\x14\x88\x1f\xb6\x2a\xc1\x58\xef" - "\x63\xfa\x91\x05\x9c\x33\xca\x3e" - "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0" - "\x47\xde\x52\xe9\x80\x17\x8b\x22" - "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94" - "\x08\x9f\x36\xcd\x41\xd8\x6f\x06" - "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55" - "\xec\x83\x1a\x8e\x25\xbc\x30\xc7" - "\x5e\xf5\x69\x00\x97\x0b\xa2\x39" - "\xd0\x44\xdb\x72\x09\x7d\x14\xab" - "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d" - "\x91\x28\xbf\x33\xca\x61\xf8\x6c" - "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde" - "\x75\x0c\x80\x17\xae\x22\xb9\x50" - "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2" - "\x36\xcd\x64\xfb\x6f\x06\x9d\x11" - "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83" - "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5" - "\x8c\x00\x97\x2e\xc5\x39\xd0\x67" - "\xfe\x72\x09\xa0\x14\xab\x42\xd9" - "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28" - "\xbf\x56\xed\x61\xf8\x8f\x03\x9a" - "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c" - "\xa3\x17\xae\x45\xdc\x50\xe7\x7e" - "\x15\x89\x20\xb7\x2b\xc2\x59\xf0" - "\x64\xfb\x92\x06\x9d\x34\xcb\x3f" - "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1" - "\x48\xdf\x53\xea\x81\x18\x8c\x23" - "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95" - "\x09\xa0\x37\xce\x42\xd9\x70\x07" - "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56" - "\xed\x84\x1b\x8f\x26\xbd\x31\xc8" - "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a" - "\xd1\x45\xdc\x73\x0a\x7e\x15\xac" - "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e" - "\x92\x29\xc0\x34\xcb\x62\xf9\x6d" - "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf" - "\x76\x0d\x81\x18\xaf\x23\xba\x51" - "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3" - "\x37\xce\x65\xfc\x70\x07\x9e\x12" - "\xa9\x40\xd7\x4b\xe2\x79\x10\x84" - "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6" - "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68" - "\xff\x73\x0a\xa1\x15\xac\x43\xda" - "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29" - "\xc0\x57\xee\x62\xf9\x90\x04\x9b" - "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d" - "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f" - "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1" - "\x65\xfc\x93\x07\x9e\x35\xcc\x40" - "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2" - "\x49\xe0\x54\xeb\x82\x19\x8d\x24" - "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96" - "\x0a\xa1\x38\xcf\x43\xda\x71\x08" - "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57" - "\xee\x85\x1c\x90\x27\xbe\x32\xc9" - "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b" - "\xd2\x46\xdd\x74\x0b\x7f\x16\xad" - "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f" - "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e" - "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0" - "\x77\x0e\x82\x19\xb0\x24\xbb\x52" - "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4" - "\x38\xcf\x66\xfd\x71\x08\x9f\x13" - "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85" - "\x1c\xb3\x27\xbe\x55\xec\x60\xf7" - "\x8e\x02\x99\x30\xc7\x3b\xd2\x69" - "\x00\x74\x0b\xa2\x16\xad\x44\xdb" - "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a" - "\xc1\x58\xef\x63\xfa\x91\x05\x9c" - "\x33\xca\x3e\xd5\x6c\x03\x77\x0e" - "\xa5\x19\xb0\x47\xde\x52\xe9\x80" - "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2" - "\x66\xfd\x94\x08\x9f\x36\xcd\x41" - "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3" - "\x4a\xe1\x55\xec\x83\x1a\x8e\x25" - "\xbc\x30\xc7\x5e\xf5\x69\x00\x97" - "\x0b\xa2\x39\xd0\x44\xdb\x72\x09" - "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58" - "\xef\x86\x1d\x91\x28\xbf\x33\xca" - "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c" - "\xd3\x47\xde\x75\x0c\x80\x17\xae" - "\x22\xb9\x50\xe7\x5b\xf2\x89\x20" - "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f" - "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1" - "\x78\x0f\x83\x1a\xb1\x25\xbc\x53" - "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5" - "\x39\xd0\x67\xfe\x72\x09\xa0\x14" - "\xab\x42\xd9\x4d\xe4\x7b\x12\x86" - "\x1d\xb4\x28\xbf\x56\xed\x61\xf8" - "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a" - "\x01\x75\x0c\xa3\x17\xae\x45\xdc" - "\x50\xe7\x7e\x15\x89\x20\xb7\x2b" - "\xc2\x59\xf0\x64\xfb\x92\x06\x9d" - "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f" - "\xa6\x1a\xb1\x48\xdf\x53\xea\x81" - "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3" - "\x67\xfe\x95\x09\xa0\x37\xce\x42" - "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4" - "\x4b\xe2\x56\xed\x84\x1b\x8f\x26" - "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98", - .psize = 2048, - .digest = (u8 *)(u16 []){ 0x23ca }, - } -}; - /* * Streebog test vectors from RFC 6986 and GOST R 34.11-2012 */ @@ -5548,65 +6133,6 @@ static const struct hash_testvec hmac_streebog512_tv_template[] = { }, }; -/* - * SM2 test vectors. - */ -static const struct akcipher_testvec sm2_tv_template[] = { - { /* Generated from openssl */ - .key = - "\x04" - "\x8e\xa0\x33\x69\x91\x7e\x3d\xec\xad\x8e\xf0\x45\x5e\x13\x3e\x68" - "\x5b\x8c\xab\x5c\xc6\xc8\x50\xdf\x91\x00\xe0\x24\x73\x4d\x31\xf2" - "\x2e\xc0\xd5\x6b\xee\xda\x98\x93\xec\xd8\x36\xaa\xb9\xcf\x63\x82" - "\xef\xa7\x1a\x03\xed\x16\xba\x74\xb8\x8b\xf9\xe5\x70\x39\xa4\x70", - .key_len = 65, - .param_len = 0, - .c = - "\x30\x45" - "\x02\x20" - "\x70\xab\xb6\x7d\xd6\x54\x80\x64\x42\x7e\x2d\x05\x08\x36\xc9\x96" - "\x25\xc2\xbb\xff\x08\xe5\x43\x15\x5e\xf3\x06\xd9\x2b\x2f\x0a\x9f" - "\x02\x21" - "\x00" - "\xbf\x21\x5f\x7e\x5d\x3f\x1a\x4d\x8f\x84\xc2\xe9\xa6\x4c\xa4\x18" - "\xb2\xb8\x46\xf4\x32\x96\xfa\x57\xc6\x29\xd4\x89\xae\xcc\xda\xdb", - .c_size = 71, - .algo = OID_SM2_with_SM3, - .m = - "\x47\xa7\xbf\xd3\xda\xc4\x79\xee\xda\x8b\x4f\xe8\x40\x94\xd4\x32" - "\x8f\xf1\xcd\x68\x4d\xbd\x9b\x1d\xe0\xd8\x9a\x5d\xad\x85\x47\x5c", - .m_size = 32, - .public_key_vec = true, - .siggen_sigver_test = true, - }, - { /* From libgcrypt */ - .key = - "\x04" - "\x87\x59\x38\x9a\x34\xaa\xad\x07\xec\xf4\xe0\xc8\xc2\x65\x0a\x44" - "\x59\xc8\xd9\x26\xee\x23\x78\x32\x4e\x02\x61\xc5\x25\x38\xcb\x47" - "\x75\x28\x10\x6b\x1e\x0b\x7c\x8d\xd5\xff\x29\xa9\xc8\x6a\x89\x06" - "\x56\x56\xeb\x33\x15\x4b\xc0\x55\x60\x91\xef\x8a\xc9\xd1\x7d\x78", - .key_len = 65, - .param_len = 0, - .c = - "\x30\x44" - "\x02\x20" - "\xd9\xec\xef\xe8\x5f\xee\x3c\x59\x57\x8e\x5b\xab\xb3\x02\xe1\x42" - "\x4b\x67\x2c\x0b\x26\xb6\x51\x2c\x3e\xfc\xc6\x49\xec\xfe\x89\xe5" - "\x02\x20" - "\x43\x45\xd0\xa5\xff\xe5\x13\x27\x26\xd0\xec\x37\xad\x24\x1e\x9a" - "\x71\x9a\xa4\x89\xb0\x7e\x0f\xc4\xbb\x2d\x50\xd0\xe5\x7f\x7a\x68", - .c_size = 70, - .algo = OID_SM2_with_SM3, - .m = - "\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff\x00" - "\x12\x34\x56\x78\x9a\xbc\xde\xf0\x12\x34\x56\x78\x9a\xbc\xde\xf0", - .m_size = 32, - .public_key_vec = true, - .siggen_sigver_test = true, - }, -}; - /* Example vectors below taken from * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf * @@ -7729,159 +8255,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { } }; -static const char vmac64_string1[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', - '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03', -}; - -static const char vmac64_string2[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', -}; - -static const char vmac64_string3[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', -}; - -static const char vmac64_string4[33] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm', - 'o', 'p', 'r', 's', 't', 'u', 'w', 'x', - 'z', -}; - -static const char vmac64_string5[143] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k', - ']', '%', '9', '2', '7', '!', 'A', -}; - -static const char vmac64_string6[145] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'p', 't', '*', '7', 'l', 'i', '!', '#', - 'w', '0', 'z', '/', '4', 'A', 'n', -}; - -static const struct hash_testvec vmac64_aes_tv_template[] = { - { /* draft-krovetz-vmac-01 test vector 1 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi", - .psize = 16, - .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b", - }, { /* draft-krovetz-vmac-01 test vector 2 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc", - .psize = 19, - .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5", - }, { /* draft-krovetz-vmac-01 test vector 3 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 64, - .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98", - }, { /* draft-krovetz-vmac-01 test vector 4 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 316, - .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string4, - .psize = sizeof(vmac64_string4), - .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string5, - .psize = sizeof(vmac64_string5), - .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string6, - .psize = sizeof(vmac64_string6), - .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4", - }, -}; - /* * SHA384 HMAC test vectors from RFC4231 */ @@ -8463,294 +8836,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { }, }; -/* - * Poly1305 test vectors from RFC7539 A.3. - */ - -static const struct hash_testvec poly1305_tv_template[] = { - { /* Test Vector #1 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #2 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e", - }, { /* Test Vector #3 */ - .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf" - "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0", - }, { /* Test Vector #4 */ - .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" - "\x27\x54\x77\x61\x73\x20\x62\x72" - "\x69\x6c\x6c\x69\x67\x2c\x20\x61" - "\x6e\x64\x20\x74\x68\x65\x20\x73" - "\x6c\x69\x74\x68\x79\x20\x74\x6f" - "\x76\x65\x73\x0a\x44\x69\x64\x20" - "\x67\x79\x72\x65\x20\x61\x6e\x64" - "\x20\x67\x69\x6d\x62\x6c\x65\x20" - "\x69\x6e\x20\x74\x68\x65\x20\x77" - "\x61\x62\x65\x3a\x0a\x41\x6c\x6c" - "\x20\x6d\x69\x6d\x73\x79\x20\x77" - "\x65\x72\x65\x20\x74\x68\x65\x20" - "\x62\x6f\x72\x6f\x67\x6f\x76\x65" - "\x73\x2c\x0a\x41\x6e\x64\x20\x74" - "\x68\x65\x20\x6d\x6f\x6d\x65\x20" - "\x72\x61\x74\x68\x73\x20\x6f\x75" - "\x74\x67\x72\x61\x62\x65\x2e", - .psize = 159, - .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61" - "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62", - }, { /* Test Vector #5 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #6 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #7 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xf0\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x11\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x05\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #8 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .psize = 80, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #9 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xfd\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - }, { /* Test Vector #10 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x14\x00\x00\x00\x00\x00\x00\x00" - "\x55\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #11 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Regression test for overflow in AVX2 implementation */ - .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff", - .psize = 300, - .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" - "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", - } -}; - /* NHPoly1305 test vectors from https://github.com/google/adiantum */ static const struct hash_testvec nhpoly1305_tv_template[] = { { @@ -23517,42 +23602,6 @@ static const struct aead_testvec aegis128_tv_template[] = { }; /* - * All key wrapping test vectors taken from - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip - * - * Note: as documented in keywrap.c, the ivout for encryption is the first - * semiblock of the ciphertext from the test vector. For decryption, iv is - * the first semiblock of the ciphertext. - */ -static const struct cipher_testvec aes_kw_tv_template[] = { - { - .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2" - "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6", - .klen = 16, - .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea" - "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f", - .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3" - "\xf5\x6f\xab\xea\x25\x48\xf5\xfb", - .len = 16, - .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d", - .generates_iv = true, - }, { - .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" - "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71" - "\x03\x86\xf9\x32\x78\x6e\xf7\x96" - "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f", - .klen = 32, - .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa" - "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa", - .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15" - "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43", - .len = 16, - .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1", - .generates_iv = true, - }, -}; - -/* * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) * test vectors, taken from Appendix B.2.9 and B.2.10: * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf @@ -38254,4 +38303,355 @@ static const struct cipher_testvec aes_hctr2_tv_template[] = { }; +#ifdef __LITTLE_ENDIAN +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x08\x00\x01\x00" /* LE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#else +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x00\x08\x00\x01" /* BE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#endif + +static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D" + "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18", + .clen = 16 + 0 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF" + "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4" + "\x42\x1D\xFD\xF8\x97\x6C", + .clen = 16 + 6 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A" + "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2" + "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3", + .clen = 16 + 16 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36" + "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC" + "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34" + "\xA2\x87\xFF\xCB\xFC", + .clen = 16 + 21 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0" + "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74" + "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C", + .clen = 16 + 0 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7" + "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60" + "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2", + .clen = 16 + 6 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B" + "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6" + "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E" + "\x40\xC4\xFF\x25\x5B\x36\xA2\x66", + .clen = 16 + 16 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE" + "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2" + "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A" + "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62", + .clen = 16 + 21 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_camellia_cts_cmac[] = { + /* rfc6803 sec 10 */ + { + // "enc no plain" + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki + "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB" + "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki + "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA" + "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59" + "\xB8", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki + "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D" + "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D" + "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki + "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder + "13 bytes byte", // Plain + .plen = 16 + 13, + .ctext = + "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20" + "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15" + "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki + "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09" + "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66" + "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4" + "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5", + .clen = 16 + 30 + 16, + }, { + // "enc no plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1" + "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki + "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1" + "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83" + "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d" + "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki + "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32" + "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F" + "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A" + "\x12", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3" + "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki + "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74" + "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15" + "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C" + "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd" + "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki + "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25" + "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder + "13 bytes byte", + .plen = 16 + 13, + .ctext = + "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57" + "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1" + "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7" + "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki + "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e" + "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD" + "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB" + "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84" + "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74", + .clen = 16 + 30 + 16, + }, +}; + #endif /* _CRYPTO_TESTMGR_H */ diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 557915e4062d..368018cfa9bf 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -24,7 +24,7 @@ * Third Edition. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/module.h> @@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(twofish_mod_init); +module_init(twofish_mod_init); module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/vmac.c b/crypto/vmac.c deleted file mode 100644 index 0a1d8efa6c1a..000000000000 --- a/crypto/vmac.c +++ /dev/null @@ -1,696 +0,0 @@ -/* - * VMAC: Message Authentication Code using Universal Hashing - * - * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 - * - * Copyright (c) 2009, Intel Corporation. - * Copyright (c) 2018, Google Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -/* - * Derived from: - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Last modified: 17 APR 08, 1700 PDT - */ - -#include <asm/unaligned.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <linux/module.h> -#include <linux/scatterlist.h> -#include <asm/byteorder.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/hash.h> - -/* - * User definable settings. - */ -#define VMAC_TAG_LEN 64 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ -#define VMAC_NONCEBYTES 16 - -/* per-transform (per-key) context */ -struct vmac_tfm_ctx { - struct crypto_cipher *cipher; - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; - u64 polykey[2*VMAC_TAG_LEN/64]; - u64 l3key[2*VMAC_TAG_LEN/64]; -}; - -/* per-request context */ -struct vmac_desc_ctx { - union { - u8 partial[VMAC_NHBYTES]; /* partial block */ - __le64 partial_words[VMAC_NHBYTES / 8]; - }; - unsigned int partial_size; /* size of the partial block */ - bool first_block_processed; - u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ - union { - u8 bytes[VMAC_NONCEBYTES]; - __be64 pads[VMAC_NONCEBYTES / 8]; - } nonce; - unsigned int nonce_size; /* nonce bytes filled so far */ -}; - -/* - * Constants and masks - */ -#define UINT64_C(x) x##ULL -static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ -static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ -static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ -static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ -static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ - -#define pe64_to_cpup le64_to_cpup /* Prefer little endian */ - -#ifdef __LITTLE_ENDIAN -#define INDEX_HIGH 1 -#define INDEX_LOW 0 -#else -#define INDEX_HIGH 0 -#define INDEX_LOW 1 -#endif - -/* - * The following routines are used in this implementation. They are - * written via macros to simulate zero-overhead call-by-reference. - * - * MUL64: 64x64->128-bit multiplication - * PMUL64: assumes top bits cleared on inputs - * ADD128: 128x128->128-bit addition - */ - -#define ADD128(rh, rl, ih, il) \ - do { \ - u64 _il = (il); \ - (rl) += (_il); \ - if ((rl) < (_il)) \ - (rh)++; \ - (rh) += (ih); \ - } while (0) - -#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) - -#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m >> 32), (m << 32)); \ - } while (0) - -#define MUL64(rh, rl, i1, i2) \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m1 = MUL32(_i1, _i2>>32); \ - u64 m2 = MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ - ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ - } while (0) - -/* - * For highest performance the L1 NH and L2 polynomial hashes should be - * carefully implemented to take advantage of one's target architecture. - * Here these two hash functions are defined multiple time; once for - * 64-bit architectures, once for 32-bit SSE2 architectures, and once - * for the rest (32-bit) architectures. - * For each, nh_16 *must* be defined (works on multiples of 16 bytes). - * Optionally, nh_vmac_nhbytes can be defined (for multiples of - * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two - * NH computations at once). - */ - -#ifdef CONFIG_64BIT - -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) - -#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) -#endif - -#define poly_step(ah, al, kh, kl, mh, ml) \ - do { \ - u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ - /* compute ab*cd, put bd into result registers */ \ - PMUL64(t3h, t3l, al, kh); \ - PMUL64(t2h, t2l, ah, kl); \ - PMUL64(t1h, t1l, ah, 2*kh); \ - PMUL64(ah, al, al, kl); \ - /* add 2 * ac to result */ \ - ADD128(ah, al, t1h, t1l); \ - /* add together ad + bc */ \ - ADD128(t2h, t2l, t3h, t3l); \ - /* now (ah,al), (t2l,2*t2h) need summing */ \ - /* first add the high registers, carrying into t2h */ \ - ADD128(t2h, ah, z, t2l); \ - /* double t2h and add top bit of ah */ \ - t2h = 2 * t2h + (ah >> 63); \ - ah &= m63; \ - /* now add the low registers */ \ - ADD128(ah, al, mh, ml); \ - ADD128(ah, al, z, t2h); \ - } while (0) - -#else /* ! CONFIG_64BIT */ - -#ifndef nh_16 -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - u64 t1, t2, m1, m2, t; \ - int i; \ - rh = rl = t = 0; \ - for (i = 0; i < nw; i += 2) { \ - t1 = pe64_to_cpup(mp+i) + kp[i]; \ - t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ - m2 = MUL32(t1 >> 32, t2); \ - m1 = MUL32(t1, t2 >> 32); \ - ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ - MUL32(t1, t2)); \ - rh += (u64)(u32)(m1 >> 32) \ - + (u32)(m2 >> 32); \ - t += (u64)(u32)m1 + (u32)m2; \ - } \ - ADD128(rh, rl, (t >> 32), (t << 32)); \ - } while (0) -#endif - -static void poly_step_func(u64 *ahi, u64 *alo, - const u64 *kh, const u64 *kl, - const u64 *mh, const u64 *ml) -{ -#define a0 (*(((u32 *)alo)+INDEX_LOW)) -#define a1 (*(((u32 *)alo)+INDEX_HIGH)) -#define a2 (*(((u32 *)ahi)+INDEX_LOW)) -#define a3 (*(((u32 *)ahi)+INDEX_HIGH)) -#define k0 (*(((u32 *)kl)+INDEX_LOW)) -#define k1 (*(((u32 *)kl)+INDEX_HIGH)) -#define k2 (*(((u32 *)kh)+INDEX_LOW)) -#define k3 (*(((u32 *)kh)+INDEX_HIGH)) - - u64 p, q, t; - u32 t2; - - p = MUL32(a3, k3); - p += p; - p += *(u64 *)mh; - p += MUL32(a0, k2); - p += MUL32(a1, k1); - p += MUL32(a2, k0); - t = (u32)(p); - p >>= 32; - p += MUL32(a0, k3); - p += MUL32(a1, k2); - p += MUL32(a2, k1); - p += MUL32(a3, k0); - t |= ((u64)((u32)p & 0x7fffffff)) << 32; - p >>= 31; - p += (u64)(((u32 *)ml)[INDEX_LOW]); - p += MUL32(a0, k0); - q = MUL32(a1, k3); - q += MUL32(a2, k2); - q += MUL32(a3, k1); - q += q; - p += q; - t2 = (u32)(p); - p >>= 32; - p += (u64)(((u32 *)ml)[INDEX_HIGH]); - p += MUL32(a0, k1); - p += MUL32(a1, k0); - q = MUL32(a2, k3); - q += MUL32(a3, k2); - q += q; - p += q; - *(u64 *)(alo) = (p << 32) | t2; - p >>= 32; - *(u64 *)(ahi) = p + t; - -#undef a0 -#undef a1 -#undef a2 -#undef a3 -#undef k0 -#undef k1 -#undef k2 -#undef k3 -} - -#define poly_step(ah, al, kh, kl, mh, ml) \ - poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) - -#endif /* end of specialized NH and poly definitions */ - -/* At least nh_16 is defined. Defined others as needed here */ -#ifndef nh_16_2 -#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_16(mp, kp, nw, rh, rl); \ - nh_16(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif -#ifndef nh_vmac_nhbytes -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - nh_16(mp, kp, nw, rh, rl) -#endif -#ifndef nh_vmac_nhbytes_2 -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ - nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif - -static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) -{ - u64 rh, rl, t, z = 0; - - /* fully reduce (p1,p2)+(len,0) mod p127 */ - t = p1 >> 63; - p1 &= m63; - ADD128(p1, p2, len, t); - /* At this point, (p1,p2) is at most 2^127+(len<<64) */ - t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); - ADD128(p1, p2, z, t); - p1 &= m63; - - /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ - t = p1 + (p2 >> 32); - t += (t >> 32); - t += (u32)t > 0xfffffffeu; - p1 += (t >> 32); - p2 += (p1 << 32); - - /* compute (p1+k1)%p64 and (p2+k2)%p64 */ - p1 += k1; - p1 += (0 - (p1 < k1)) & 257; - p2 += k2; - p2 += (0 - (p2 < k2)) & 257; - - /* compute (p1+k1)*(p2+k2)%p64 */ - MUL64(rh, rl, p1, p2); - t = rh >> 56; - ADD128(t, rl, z, rh); - rh <<= 8; - ADD128(t, rl, z, rh); - t += t << 8; - rl += t; - rl += (0 - (rl < t)) & 257; - rl += (0 - (rl > p64-1)) & 257; - return rl; -} - -/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ -static void vhash_blocks(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx, - const __le64 *mptr, unsigned int blocks) -{ - const u64 *kptr = tctx->nhkey; - const u64 pkh = tctx->polykey[0]; - const u64 pkl = tctx->polykey[1]; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - u64 rh, rl; - - if (!dctx->first_block_processed) { - dctx->first_block_processed = true; - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - ADD128(ch, cl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - blocks--; - } - - while (blocks--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - - dctx->polytmp[0] = ch; - dctx->polytmp[1] = cl; -} - -static int vmac_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); - __be64 out[2]; - u8 in[16] = { 0 }; - unsigned int i; - int err; - - if (keylen != VMAC_KEY_LEN) - return -EINVAL; - - err = crypto_cipher_setkey(tctx->cipher, key, keylen); - if (err) - return err; - - /* Fill nh key */ - in[0] = 0x80; - for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->nhkey[i] = be64_to_cpu(out[0]); - tctx->nhkey[i+1] = be64_to_cpu(out[1]); - in[15]++; - } - - /* Fill poly key */ - in[0] = 0xC0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; - tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; - in[15]++; - } - - /* Fill ip key */ - in[0] = 0xE0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { - do { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->l3key[i] = be64_to_cpu(out[0]); - tctx->l3key[i+1] = be64_to_cpu(out[1]); - in[15]++; - } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); - } - - return 0; -} - -static int vmac_init(struct shash_desc *desc) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->partial_size = 0; - dctx->first_block_processed = false; - memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); - dctx->nonce_size = 0; - return 0; -} - -static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int n; - - /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */ - if (dctx->nonce_size < VMAC_NONCEBYTES) { - n = min(len, VMAC_NONCEBYTES - dctx->nonce_size); - memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n); - dctx->nonce_size += n; - p += n; - len -= n; - } - - if (dctx->partial_size) { - n = min(len, VMAC_NHBYTES - dctx->partial_size); - memcpy(&dctx->partial[dctx->partial_size], p, n); - dctx->partial_size += n; - p += n; - len -= n; - if (dctx->partial_size == VMAC_NHBYTES) { - vhash_blocks(tctx, dctx, dctx->partial_words, 1); - dctx->partial_size = 0; - } - } - - if (len >= VMAC_NHBYTES) { - n = round_down(len, VMAC_NHBYTES); - /* TODO: 'p' may be misaligned here */ - vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); - p += n; - len -= n; - } - - if (len) { - memcpy(dctx->partial, p, len); - dctx->partial_size = len; - } - - return 0; -} - -static u64 vhash_final(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx) -{ - unsigned int partial = dctx->partial_size; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - - /* L1 and L2-hash the final block if needed */ - if (partial) { - /* Zero-pad to next 128-bit boundary */ - unsigned int n = round_up(partial, 16); - u64 rh, rl; - - memset(&dctx->partial[partial], 0, n - partial); - nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); - rh &= m62; - if (dctx->first_block_processed) - poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], - rh, rl); - else - ADD128(ch, cl, rh, rl); - } - - /* L3-hash the 128-bit output of L2-hash */ - return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); -} - -static int vmac_final(struct shash_desc *desc, u8 *out) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - int index; - u64 hash, pad; - - if (dctx->nonce_size != VMAC_NONCEBYTES) - return -EINVAL; - - /* - * The VMAC specification requires a nonce at least 1 bit shorter than - * the block cipher's block length, so we actually only accept a 127-bit - * nonce. We define the unused bit to be the first one and require that - * it be 0, so the needed prepending of a 0 bit is implicit. - */ - if (dctx->nonce.bytes[0] & 0x80) - return -EINVAL; - - /* Finish calculating the VHASH of the message */ - hash = vhash_final(tctx, dctx); - - /* Generate pseudorandom pad by encrypting the nonce */ - BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8)); - index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1; - dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1; - crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes, - dctx->nonce.bytes); - pad = be64_to_cpu(dctx->nonce.pads[index]); - - /* The VMAC is the sum of VHASH and the pseudorandom pad */ - put_unaligned_be64(hash + pad, out); - return 0; -} - -static int vmac_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - tctx->cipher = cipher; - return 0; -} - -static void vmac_exit_tfm(struct crypto_tfm *tfm) -{ - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(tctx->cipher); -} - -static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct shash_instance *inst; - struct crypto_cipher_spawn *spawn; - struct crypto_alg *alg; - u32 mask; - int err; - - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - spawn = shash_instance_ctx(inst); - - err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, mask); - if (err) - goto err_free_inst; - alg = crypto_spawn_cipher_alg(spawn); - - err = -EINVAL; - if (alg->cra_blocksize != VMAC_NONCEBYTES) - goto err_free_inst; - - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); - if (err) - goto err_free_inst; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - - inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); - inst->alg.base.cra_init = vmac_init_tfm; - inst->alg.base.cra_exit = vmac_exit_tfm; - - inst->alg.descsize = sizeof(struct vmac_desc_ctx); - inst->alg.digestsize = VMAC_TAG_LEN / 8; - inst->alg.init = vmac_init; - inst->alg.update = vmac_update; - inst->alg.final = vmac_final; - inst->alg.setkey = vmac_setkey; - - inst->free = shash_free_singlespawn_instance; - - err = shash_register_instance(tmpl, inst); - if (err) { -err_free_inst: - shash_free_singlespawn_instance(inst); - } - return err; -} - -static struct crypto_template vmac64_tmpl = { - .name = "vmac64", - .create = vmac_create, - .module = THIS_MODULE, -}; - -static int __init vmac_module_init(void) -{ - return crypto_register_template(&vmac64_tmpl); -} - -static void __exit vmac_module_exit(void) -{ - crypto_unregister_template(&vmac64_tmpl); -} - -subsys_initcall(vmac_module_init); -module_exit(vmac_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("VMAC hash algorithm"); -MODULE_ALIAS_CRYPTO("vmac64"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/wp512.c b/crypto/wp512.c index 07994e5ebf4e..229b189a7988 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -21,10 +21,10 @@ */ #include <crypto/internal/hash.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <asm/byteorder.h> -#include <linux/types.h> +#include <linux/string.h> +#include <linux/unaligned.h> #define WP512_DIGEST_SIZE 64 #define WP384_DIGEST_SIZE 48 @@ -37,9 +37,6 @@ struct wp512_ctx { u8 bitLength[WP512_LENGTHBYTES]; - u8 buffer[WP512_BLOCK_SIZE]; - int bufferBits; - int bufferPos; u64 hash[WP512_DIGEST_SIZE/8]; }; @@ -779,16 +776,16 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = { * The core Whirlpool transform. */ -static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx) { +static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx, + const u8 *buffer) { int i, r; u64 K[8]; /* the round key */ u64 block[8]; /* mu(buffer) */ u64 state[8]; /* the cipher state */ u64 L[8]; - const __be64 *buffer = (const __be64 *)wctx->buffer; for (i = 0; i < 8; i++) - block[i] = be64_to_cpu(buffer[i]); + block[i] = get_unaligned_be64(buffer + i * 8); state[0] = block[0] ^ (K[0] = wctx->hash[0]); state[1] = block[1] ^ (K[1] = wctx->hash[1]); @@ -991,8 +988,6 @@ static int wp512_init(struct shash_desc *desc) { int i; memset(wctx->bitLength, 0, 32); - wctx->bufferBits = wctx->bufferPos = 0; - wctx->buffer[0] = 0; for (i = 0; i < 8; i++) { wctx->hash[i] = 0L; } @@ -1000,84 +995,54 @@ static int wp512_init(struct shash_desc *desc) { return 0; } -static int wp512_update(struct shash_desc *desc, const u8 *source, - unsigned int len) +static void wp512_add_length(u8 *bitLength, u64 value) { - struct wp512_ctx *wctx = shash_desc_ctx(desc); - int sourcePos = 0; - unsigned int bits_len = len * 8; // convert to number of bits - int sourceGap = (8 - ((int)bits_len & 7)) & 7; - int bufferRem = wctx->bufferBits & 7; + u32 carry; int i; - u32 b, carry; - u8 *buffer = wctx->buffer; - u8 *bitLength = wctx->bitLength; - int bufferBits = wctx->bufferBits; - int bufferPos = wctx->bufferPos; - u64 value = bits_len; for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) { carry += bitLength[i] + ((u32)value & 0xff); bitLength[i] = (u8)carry; carry >>= 8; value >>= 8; } - while (bits_len > 8) { - b = ((source[sourcePos] << sourceGap) & 0xff) | - ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap)); - buffer[bufferPos++] |= (u8)(b >> bufferRem); - bufferBits += 8 - bufferRem; - if (bufferBits == WP512_BLOCK_SIZE * 8) { - wp512_process_buffer(wctx); - bufferBits = bufferPos = 0; - } - buffer[bufferPos] = b << (8 - bufferRem); - bufferBits += bufferRem; - bits_len -= 8; - sourcePos++; - } - if (bits_len > 0) { - b = (source[sourcePos] << sourceGap) & 0xff; - buffer[bufferPos] |= b >> bufferRem; - } else { - b = 0; - } - if (bufferRem + bits_len < 8) { - bufferBits += bits_len; - } else { - bufferPos++; - bufferBits += 8 - bufferRem; - bits_len -= 8 - bufferRem; - if (bufferBits == WP512_BLOCK_SIZE * 8) { - wp512_process_buffer(wctx); - bufferBits = bufferPos = 0; - } - buffer[bufferPos] = b << (8 - bufferRem); - bufferBits += (int)bits_len; - } +} - wctx->bufferBits = bufferBits; - wctx->bufferPos = bufferPos; +static int wp512_update(struct shash_desc *desc, const u8 *source, + unsigned int len) +{ + struct wp512_ctx *wctx = shash_desc_ctx(desc); + unsigned int remain = len % WP512_BLOCK_SIZE; + u64 bits_len = (len - remain) * 8ull; + u8 *bitLength = wctx->bitLength; - return 0; + wp512_add_length(bitLength, bits_len); + do { + wp512_process_buffer(wctx, source); + source += WP512_BLOCK_SIZE; + bits_len -= WP512_BLOCK_SIZE * 8; + } while (bits_len); + + return remain; } -static int wp512_final(struct shash_desc *desc, u8 *out) +static int wp512_finup(struct shash_desc *desc, const u8 *src, + unsigned int bufferPos, u8 *out) { struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; - u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; - int bufferBits = wctx->bufferBits; - int bufferPos = wctx->bufferPos; __be64 *digest = (__be64 *)out; + u8 buffer[WP512_BLOCK_SIZE]; - buffer[bufferPos] |= 0x80U >> (bufferBits & 7); + wp512_add_length(bitLength, bufferPos * 8); + memcpy(buffer, src, bufferPos); + buffer[bufferPos] = 0x80U; bufferPos++; if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) { if (bufferPos < WP512_BLOCK_SIZE) memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos); - wp512_process_buffer(wctx); + wp512_process_buffer(wctx, buffer); bufferPos = 0; } if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES) @@ -1086,31 +1051,32 @@ static int wp512_final(struct shash_desc *desc, u8 *out) bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES; memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], bitLength, WP512_LENGTHBYTES); - wp512_process_buffer(wctx); + wp512_process_buffer(wctx, buffer); + memzero_explicit(buffer, sizeof(buffer)); for (i = 0; i < WP512_DIGEST_SIZE/8; i++) digest[i] = cpu_to_be64(wctx->hash[i]); - wctx->bufferBits = bufferBits; - wctx->bufferPos = bufferPos; return 0; } -static int wp384_final(struct shash_desc *desc, u8 *out) +static int wp384_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { u8 D[64]; - wp512_final(desc, D); + wp512_finup(desc, src, len, D); memcpy(out, D, WP384_DIGEST_SIZE); memzero_explicit(D, WP512_DIGEST_SIZE); return 0; } -static int wp256_final(struct shash_desc *desc, u8 *out) +static int wp256_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { u8 D[64]; - wp512_final(desc, D); + wp512_finup(desc, src, len, D); memcpy(out, D, WP256_DIGEST_SIZE); memzero_explicit(D, WP512_DIGEST_SIZE); @@ -1121,11 +1087,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP512_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp512_final, + .finup = wp512_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp512", .cra_driver_name = "wp512-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1133,11 +1100,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP384_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp384_final, + .finup = wp384_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp384", .cra_driver_name = "wp384-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1145,11 +1113,12 @@ static struct shash_alg wp_algs[3] = { { .digestsize = WP256_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, - .final = wp256_final, + .finup = wp256_finup, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp256", .cra_driver_name = "wp256-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1169,7 +1138,7 @@ MODULE_ALIAS_CRYPTO("wp512"); MODULE_ALIAS_CRYPTO("wp384"); MODULE_ALIAS_CRYPTO("wp256"); -subsys_initcall(wp512_mod_init); +module_init(wp512_mod_init); module_exit(wp512_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index a9e8ee9c1949..6c5f6766fdd6 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -8,9 +8,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, @@ -30,22 +33,6 @@ struct xcbc_tfm_ctx { u8 consts[]; }; -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | xcbc_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct xcbc_desc_ctx { - unsigned int len; - u8 odds[]; -}; - #define XCBC_BLOCKSIZE 16 static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, @@ -70,13 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = &ctx->odds[bs]; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -85,77 +69,36 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } - - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); + u8 *prev = shash_desc_ctx(pdesc); - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len -1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } - - crypto_xor(prev, odds, bs); crypto_xor(prev, &tctx->consts[offset], bs); - crypto_cipher_encrypt_one(tfm, out, prev); - return 0; } @@ -216,17 +159,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct xcbc_desc_ctx) + - alg->cra_blocksize * 2; + inst->alg.descsize = alg->cra_blocksize; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; inst->alg.init = crypto_xcbc_digest_init; inst->alg.update = crypto_xcbc_digest_update; - inst->alg.final = crypto_xcbc_digest_final; + inst->alg.finup = crypto_xcbc_digest_finup; inst->alg.setkey = crypto_xcbc_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -255,10 +199,10 @@ static void __exit crypto_xcbc_module_exit(void) crypto_unregister_template(&crypto_xcbc_tmpl); } -subsys_initcall(crypto_xcbc_module_init); +module_init(crypto_xcbc_module_init); module_exit(crypto_xcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCBC keyed hash algorithm"); MODULE_ALIAS_CRYPTO("xcbc"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/xctr.c b/crypto/xctr.c index 5c00147e8ec4..607ab82cb19b 100644 --- a/crypto/xctr.c +++ b/crypto/xctr.c @@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk, crypto_cipher_alg(tfm)->cia_encrypt; unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; - u8 *data = walk->src.virt.addr; + u8 *data = walk->dst.virt.addr; u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); @@ -182,10 +182,10 @@ static void __exit crypto_xctr_module_exit(void) crypto_unregister_template(&crypto_xctr_tmpl); } -subsys_initcall(crypto_xctr_module_init); +module_init(crypto_xctr_module_init); module_exit(crypto_xctr_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("xctr"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/xor.c b/crypto/xor.c index 8e72e5d5db0d..f39621a57bb3 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0; tmpl->next = template_list; template_list = tmpl; preempt_disable(); - min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start); preempt_enable(); // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed; pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); @@ -165,6 +162,7 @@ out: static __exit void xor_exit(void) { } +MODULE_DESCRIPTION("RAID-5 checksumming functions"); MODULE_LICENSE("GPL"); #ifndef MODULE diff --git a/crypto/xts.c b/crypto/xts.c index 672e1a3f0b0c..3da8f5e053d6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, while (w.nbytes) { unsigned int avail = w.nbytes; - le128 *wsrc; + const le128 *wsrc; le128 *wdst; wsrc = w.src.virt.addr; @@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(name, cipher_name + 4, sizeof(name)); @@ -466,11 +466,11 @@ static void __exit xts_module_exit(void) crypto_unregister_template(&xts_tmpl); } -subsys_initcall(xts_module_init); +module_init(xts_module_init); module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); MODULE_ALIAS_CRYPTO("xts"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); MODULE_SOFTDEP("pre: ecb"); diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c index 55d1c8a76127..175bb7ae0fcd 100644 --- a/crypto/xxhash_generic.c +++ b/crypto/xxhash_generic.c @@ -4,7 +4,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/xxhash.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 @@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(xxhash_mod_init); +module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); diff --git a/crypto/zstd.c b/crypto/zstd.c index 154a969c83a8..7570e11b4ee6 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -103,7 +103,7 @@ static int __zstd_init(void *ctx) return ret; } -static void *zstd_alloc_ctx(struct crypto_scomp *tfm) +static void *zstd_alloc_ctx(void) { int ret; struct zstd_ctx *ctx; @@ -121,32 +121,18 @@ static void *zstd_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int zstd_init(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_init(ctx); -} - static void __zstd_exit(void *ctx) { zstd_comp_exit(ctx); zstd_decomp_exit(ctx); } -static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void zstd_free_ctx(void *ctx) { __zstd_exit(ctx); kfree_sensitive(ctx); } -static void zstd_exit(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - __zstd_exit(ctx); -} - static int __zstd_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -161,14 +147,6 @@ static int __zstd_compress(const u8 *src, unsigned int slen, return 0; } -static int zstd_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_compress(src, slen, dst, dlen, ctx); -} - static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -189,14 +167,6 @@ static int __zstd_decompress(const u8 *src, unsigned int slen, return 0; } -static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_decompress(src, slen, dst, dlen, ctx); -} - static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -204,19 +174,6 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __zstd_decompress(src, slen, dst, dlen, ctx); } -static struct crypto_alg alg = { - .cra_name = "zstd", - .cra_driver_name = "zstd-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zstd_ctx), - .cra_module = THIS_MODULE, - .cra_init = zstd_init, - .cra_exit = zstd_exit, - .cra_u = { .compress = { - .coa_compress = zstd_compress, - .coa_decompress = zstd_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = zstd_alloc_ctx, .free_ctx = zstd_free_ctx, @@ -231,26 +188,15 @@ static struct scomp_alg scomp = { static int __init zstd_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) - crypto_unregister_alg(&alg); - - return ret; + return crypto_register_scomp(&scomp); } static void __exit zstd_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } -subsys_initcall(zstd_mod_init); +module_init(zstd_mod_init); module_exit(zstd_mod_fini); MODULE_LICENSE("GPL"); |