summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/842.c82
-rw-r--r--crypto/Kconfig426
-rw-r--r--crypto/Makefile73
-rw-r--r--crypto/acompress.c478
-rw-r--r--crypto/adiantum.c89
-rw-r--r--crypto/aead.c79
-rw-r--r--crypto/aegis-neon.h17
-rw-r--r--crypto/aegis128-core.c18
-rw-r--r--crypto/aegis128-neon-inner.c1
-rw-r--r--crypto/aegis128-neon.c45
-rw-r--r--crypto/aes_generic.c6
-rw-r--r--crypto/aes_ti.c2
-rw-r--r--crypto/af_alg.c390
-rw-r--r--crypto/ahash.c1016
-rw-r--r--crypto/akcipher.c128
-rw-r--r--crypto/algapi.c463
-rw-r--r--crypto/algboss.c37
-rw-r--r--crypto/algif_aead.c149
-rw-r--r--crypto/algif_hash.c226
-rw-r--r--crypto/algif_rng.c5
-rw-r--r--crypto/algif_skcipher.c97
-rw-r--r--crypto/ansi_cprng.c474
-rw-r--r--crypto/anubis.c23
-rw-r--r--crypto/api.c230
-rw-r--r--crypto/arc4.c67
-rw-r--r--crypto/aria_generic.c43
-rw-r--r--crypto/asymmetric_keys/Kconfig22
-rw-r--r--crypto/asymmetric_keys/Makefile5
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c32
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c16
-rw-r--r--crypto/asymmetric_keys/pkcs7.asn17
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c22
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c12
-rw-r--r--crypto/asymmetric_keys/pkcs8.asn16
-rw-r--r--crypto/asymmetric_keys/public_key.c365
-rw-r--r--crypto/asymmetric_keys/restrict.c95
-rw-r--r--crypto/asymmetric_keys/selftest.c228
-rw-r--r--crypto/asymmetric_keys/selftest.h22
-rw-r--r--crypto/asymmetric_keys/selftest_ecdsa.c88
-rw-r--r--crypto/asymmetric_keys/selftest_rsa.c171
-rw-r--r--crypto/asymmetric_keys/signature.c63
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c46
-rw-r--r--crypto/asymmetric_keys/x509.asn17
-rw-r--r--crypto/asymmetric_keys/x509_akid.asn129
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c152
-rw-r--r--crypto/asymmetric_keys/x509_loader.c1
-rw-r--r--crypto/asymmetric_keys/x509_parser.h12
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c60
-rw-r--r--crypto/async_tx/async_pq.c12
-rw-r--r--crypto/async_tx/async_raid6_recov.c4
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/async_tx/async_xor.c26
-rw-r--r--crypto/authenc.c139
-rw-r--r--crypto/authencesn.c83
-rw-r--r--crypto/blake2b.c111
-rw-r--r--crypto/blake2b_generic.c187
-rw-r--r--crypto/blowfish_common.c3
-rw-r--r--crypto/blowfish_generic.c7
-rw-r--r--crypto/bpf_crypto_skcipher.c83
-rw-r--r--crypto/camellia_generic.c6
-rw-r--r--crypto/cast5_generic.c6
-rw-r--r--crypto/cast6_generic.c6
-rw-r--r--crypto/cast_common.c1
-rw-r--r--crypto/cbc.c170
-rw-r--r--crypto/ccm.c87
-rw-r--r--crypto/cfb.c254
-rw-r--r--crypto/chacha.c (renamed from crypto/chacha_generic.c)94
-rw-r--r--crypto/chacha20poly1305.c344
-rw-r--r--crypto/cipher.c37
-rw-r--r--crypto/cmac.c157
-rw-r--r--crypto/compress.c32
-rw-r--r--crypto/compress.h21
-rw-r--r--crypto/crc32.c (renamed from crypto/crc32_generic.c)47
-rw-r--r--crypto/crc32c.c (renamed from crypto/crc32c_generic.c)44
-rw-r--r--crypto/crc64_rocksoft_generic.c89
-rw-r--r--crypto/crct10dif_common.c82
-rw-r--r--crypto/crct10dif_generic.c122
-rw-r--r--crypto/cryptd.c377
-rw-r--r--crypto/crypto_engine.c300
-rw-r--r--crypto/crypto_null.c90
-rw-r--r--crypto/crypto_user.c (renamed from crypto/crypto_user_base.c)26
-rw-r--r--crypto/crypto_user_stat.c335
-rw-r--r--crypto/ctr.c28
-rw-r--r--crypto/cts.c26
-rw-r--r--crypto/curve25519-generic.c90
-rw-r--r--crypto/deflate.c423
-rw-r--r--crypto/des_generic.c4
-rw-r--r--crypto/df_sp80090a.c232
-rw-r--r--crypto/dh.c68
-rw-r--r--crypto/drbg.c319
-rw-r--r--crypto/ecb.c194
-rw-r--r--crypto/ecc.c131
-rw-r--r--crypto/ecc_curve_defs.h49
-rw-r--r--crypto/ecdh.c13
-rw-r--r--crypto/ecdsa-p1363.c161
-rw-r--r--crypto/ecdsa-x962.c238
-rw-r--r--crypto/ecdsa.c275
-rw-r--r--crypto/echainiv.c20
-rw-r--r--crypto/ecrdsa.c67
-rw-r--r--crypto/ecrdsa_defs.h5
-rw-r--r--crypto/essiv.c59
-rw-r--r--crypto/fcrypt.c4
-rw-r--r--crypto/fips.c25
-rw-r--r--crypto/gcm.c91
-rw-r--r--crypto/geniv.c13
-rw-r--r--crypto/gf128mul.c416
-rw-r--r--crypto/ghash-generic.c58
-rw-r--r--crypto/hash.h18
-rw-r--r--crypto/hash_info.c57
-rw-r--r--crypto/hctr2.c246
-rw-r--r--crypto/hkdf.c573
-rw-r--r--crypto/hmac.c456
-rw-r--r--crypto/internal.h60
-rw-r--r--crypto/jitterentropy-kcapi.c243
-rw-r--r--crypto/jitterentropy-testing.c295
-rw-r--r--crypto/jitterentropy.c514
-rw-r--r--crypto/jitterentropy.h24
-rw-r--r--crypto/kdf_sp800108.c10
-rw-r--r--crypto/keywrap.c320
-rw-r--r--crypto/khazad.c21
-rw-r--r--crypto/kpp.c24
-rw-r--r--crypto/krb5/Kconfig26
-rw-r--r--crypto/krb5/Makefile18
-rw-r--r--crypto/krb5/internal.h247
-rw-r--r--crypto/krb5/krb5_api.c452
-rw-r--r--crypto/krb5/krb5_kdf.c145
-rw-r--r--crypto/krb5/rfc3961_simplified.c793
-rw-r--r--crypto/krb5/rfc3962_aes.c115
-rw-r--r--crypto/krb5/rfc6803_camellia.c237
-rw-r--r--crypto/krb5/rfc8009_aes2.c362
-rw-r--r--crypto/krb5/selftest.c545
-rw-r--r--crypto/krb5/selftest_data.c291
-rw-r--r--crypto/krb5enc.c504
-rw-r--r--crypto/lrw.c30
-rw-r--r--crypto/lskcipher.c593
-rw-r--r--crypto/lz4.c77
-rw-r--r--crypto/lz4hc.c82
-rw-r--r--crypto/lzo-rle.c86
-rw-r--r--crypto/lzo.c86
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c382
-rw-r--r--crypto/michael_mic.c4
-rw-r--r--crypto/nhpoly1305.c4
-rw-r--r--crypto/ofb.c106
-rw-r--r--crypto/pcbc.c36
-rw-r--r--crypto/pcrypt.c21
-rw-r--r--crypto/poly1305_generic.c149
-rw-r--r--crypto/polyval-generic.c245
-rw-r--r--crypto/proc.c10
-rw-r--r--crypto/rmd160.c90
-rw-r--r--crypto/rng.c30
-rw-r--r--crypto/rsa-pkcs1pad.c390
-rw-r--r--crypto/rsa.c110
-rw-r--r--crypto/rsaprivkey.asn17
-rw-r--r--crypto/rsapubkey.asn17
-rw-r--r--crypto/rsassa-pkcs1.c437
-rw-r--r--crypto/scatterwalk.c179
-rw-r--r--crypto/scompress.c303
-rw-r--r--crypto/seed.c52
-rw-r--r--crypto/seqiv.c26
-rw-r--r--crypto/serpent_generic.c6
-rw-r--r--crypto/sha1.c240
-rw-r--r--crypto/sha1_generic.c96
-rw-r--r--crypto/sha256.c419
-rw-r--r--crypto/sha256_generic.c110
-rw-r--r--crypto/sha3.c166
-rw-r--r--crypto/sha3_generic.c305
-rw-r--r--crypto/sha512.c425
-rw-r--r--crypto/sha512_generic.c227
-rw-r--r--crypto/shash.c605
-rw-r--r--crypto/sig.c182
-rw-r--r--crypto/simd.c77
-rw-r--r--crypto/skcipher.c651
-rw-r--r--crypto/skcipher.h18
-rw-r--r--crypto/sm2.c460
-rw-r--r--crypto/sm2signature.asn14
-rw-r--r--crypto/sm3.c246
-rw-r--r--crypto/sm3_generic.c33
-rw-r--r--crypto/sm4.c2
-rw-r--r--crypto/sm4_generic.c6
-rw-r--r--crypto/streebog_generic.c73
-rw-r--r--crypto/tcrypt.c434
-rw-r--r--crypto/tcrypt.h24
-rw-r--r--crypto/tea.c87
-rw-r--r--crypto/testmgr.c1461
-rw-r--r--crypto/testmgr.h6073
-rw-r--r--crypto/twofish_common.c2
-rw-r--r--crypto/twofish_generic.c6
-rw-r--r--crypto/vmac.c697
-rw-r--r--crypto/wp512.c123
-rw-r--r--crypto/xcbc.c120
-rw-r--r--crypto/xctr.c6
-rw-r--r--crypto/xor.c32
-rw-r--r--crypto/xts.c67
-rw-r--r--crypto/xxhash_generic.c4
-rw-r--r--crypto/zstd.c397
196 files changed, 19185 insertions, 17359 deletions
diff --git a/crypto/842.c b/crypto/842.c
index e59e54d76960..4007e87bed80 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -18,17 +18,12 @@
* drivers/crypto/nx/nx-842-crypto.c
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/sw842.h>
-#include <crypto/internal/scompress.h>
-
-struct crypto842_ctx {
- void *wmem; /* working memory for compress */
-};
-static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+static void *crypto842_alloc_ctx(void)
{
void *ctx;
@@ -39,38 +34,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int crypto842_init(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->wmem = crypto842_alloc_ctx(NULL);
- if (IS_ERR(ctx->wmem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void crypto842_free_ctx(void *ctx)
{
kfree(ctx);
}
-static void crypto842_exit(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto842_free_ctx(NULL, ctx->wmem);
-}
-
-static int crypto842_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return sw842_compress(src, slen, dst, dlen, ctx->wmem);
-}
-
static int crypto842_scompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -78,13 +46,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm,
return sw842_compress(src, slen, dst, dlen, ctx);
}
-static int crypto842_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return sw842_decompress(src, slen, dst, dlen);
-}
-
static int crypto842_sdecompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -92,23 +53,11 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm,
return sw842_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "842",
- .cra_driver_name = "842-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct crypto842_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crypto842_init,
- .cra_exit = crypto842_exit,
- .cra_u = { .compress = {
- .coa_compress = crypto842_compress,
- .coa_decompress = crypto842_decompress } }
-};
-
static struct scomp_alg scomp = {
- .alloc_ctx = crypto842_alloc_ctx,
- .free_ctx = crypto842_free_ctx,
+ .streams = {
+ .alloc_ctx = crypto842_alloc_ctx,
+ .free_ctx = crypto842_free_ctx,
+ },
.compress = crypto842_scompress,
.decompress = crypto842_sdecompress,
.base = {
@@ -121,25 +70,12 @@ static struct scomp_alg scomp = {
static int __init crypto842_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
-subsys_initcall(crypto842_mod_init);
+module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index d779667671b2..2e5b195b1b06 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+ depends on CRYPTO_DRBG && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -71,18 +71,25 @@ config CRYPTO_AEAD
config CRYPTO_AEAD2
tristate
select CRYPTO_ALGAPI2
- select CRYPTO_NULL2
- select CRYPTO_RNG2
+
+config CRYPTO_SIG
+ tristate
+ select CRYPTO_SIG2
+ select CRYPTO_ALGAPI
+
+config CRYPTO_SIG2
+ tristate
+ select CRYPTO_ALGAPI2
config CRYPTO_SKCIPHER
tristate
select CRYPTO_SKCIPHER2
select CRYPTO_ALGAPI
+ select CRYPTO_ECB
config CRYPTO_SKCIPHER2
tristate
select CRYPTO_ALGAPI2
- select CRYPTO_RNG2
config CRYPTO_HASH
tristate
@@ -134,21 +141,30 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
+config CRYPTO_HKDF
+ tristate
+ select CRYPTO_SHA256 if CRYPTO_SELFTESTS
+ select CRYPTO_SHA512 if CRYPTO_SELFTESTS
+ select CRYPTO_HASH2
+
config CRYPTO_MANAGER
- tristate "Cryptographic algorithm manager"
+ tristate
+ default CRYPTO_ALGAPI if CRYPTO_SELFTESTS
select CRYPTO_MANAGER2
help
- Create default cryptographic template instantiations such as
- cbc(aes).
+ This provides the support for instantiating templates such as
+ cbc(aes), and the support for the crypto self-tests.
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
+ select CRYPTO_ACOMP2
select CRYPTO_AEAD2
- select CRYPTO_HASH2
- select CRYPTO_SKCIPHER2
select CRYPTO_AKCIPHER2
+ select CRYPTO_SIG2
+ select CRYPTO_HASH2
select CRYPTO_KPP2
- select CRYPTO_ACOMP2
+ select CRYPTO_RNG2
+ select CRYPTO_SKCIPHER2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -158,38 +174,44 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-config CRYPTO_MANAGER_DISABLE_TESTS
- bool "Disable run-time self tests"
- default y
+config CRYPTO_SELFTESTS
+ bool "Enable cryptographic self-tests"
+ depends on EXPERT
help
- Disable run-time self tests that normally take place at
- algorithm registration.
+ Enable the cryptographic self-tests.
+
+ The cryptographic self-tests run at boot time, or at algorithm
+ registration time if algorithms are dynamically loaded later.
+
+ There are two main use cases for these tests:
+
+ - Development and pre-release testing. In this case, also enable
+ CRYPTO_SELFTESTS_FULL to get the full set of tests. All crypto code
+ in the kernel is expected to pass the full set of tests.
-config CRYPTO_MANAGER_EXTRA_TESTS
- bool "Enable extra run-time crypto self tests"
- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER
+ - Production kernels, to help prevent buggy drivers from being used
+ and/or meet FIPS 140-3 pre-operational testing requirements. In
+ this case, enable CRYPTO_SELFTESTS but not CRYPTO_SELFTESTS_FULL.
+
+config CRYPTO_SELFTESTS_FULL
+ bool "Enable the full set of cryptographic self-tests"
+ depends on CRYPTO_SELFTESTS
help
- Enable extra run-time self tests of registered crypto algorithms,
- including randomized fuzz tests.
+ Enable the full set of cryptographic self-tests for each algorithm.
- This is intended for developer use only, as these tests take much
- longer to run than the normal self tests.
+ The full set of tests should be enabled for development and
+ pre-release testing, but not in production kernels.
-config CRYPTO_GF128MUL
- tristate
+ All crypto code in the kernel is expected to pass the full tests.
config CRYPTO_NULL
tristate "Null algorithms"
- select CRYPTO_NULL2
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
help
These are 'Null' algorithms, used by IPsec, which do nothing.
-config CRYPTO_NULL2
- tristate
- select CRYPTO_ALGAPI2
- select CRYPTO_SKCIPHER2
- select CRYPTO_HASH2
-
config CRYPTO_PCRYPT
tristate "Parallel crypto engine"
depends on SMP
@@ -216,18 +238,32 @@ config CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
This is required for IPSec ESP (XFRM_ESP).
-config CRYPTO_TEST
- tristate "Testing module"
+config CRYPTO_KRB5ENC
+ tristate "Kerberos 5 combined hash+cipher support"
+ select CRYPTO_AEAD
+ select CRYPTO_SKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_HASH
+ help
+ Combined hash and cipher support for Kerberos 5 RFC3961 simplified
+ profile. This is required for Kerberos 5-style encryption, used by
+ sunrpc/NFS and rxrpc/AFS.
+
+config CRYPTO_BENCHMARK
+ tristate "Crypto benchmarking module"
depends on m || EXPERT
select CRYPTO_MANAGER
help
- Quick & dirty crypto test module.
+ Quick & dirty crypto benchmarking module.
+
+ This is mainly intended for use by people developing cryptographic
+ algorithms in the kernel. It should not be enabled in production
+ kernels.
config CRYPTO_SIMD
tristate
@@ -244,6 +280,7 @@ config CRYPTO_RSA
tristate "RSA (Rivest-Shamir-Adleman)"
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
+ select CRYPTO_SIG
select MPILIB
select ASN1
help
@@ -284,19 +321,19 @@ config CRYPTO_ECDH
config CRYPTO_ECDSA
tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)"
select CRYPTO_ECC
- select CRYPTO_AKCIPHER
+ select CRYPTO_SIG
select ASN1
help
ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186,
ISO/IEC 14888-3)
- using curves P-192, P-256, and P-384
+ using curves P-192, P-256, P-384 and P-521
Only signature verification is implemented.
config CRYPTO_ECRDSA
tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)"
select CRYPTO_ECC
- select CRYPTO_AKCIPHER
+ select CRYPTO_SIG
select CRYPTO_STREEBOG
select OID_REGISTRY
select ASN1
@@ -307,31 +344,6 @@ config CRYPTO_ECRDSA
One of the Russian cryptographic standard algorithms (called GOST
algorithms). Only signature verification is implemented.
-config CRYPTO_SM2
- tristate "SM2 (ShangMi 2)"
- select CRYPTO_SM3
- select CRYPTO_AKCIPHER
- select CRYPTO_MANAGER
- select MPILIB
- select ASN1
- help
- SM2 (ShangMi 2) public key algorithm
-
- Published by State Encryption Management Bureau, China,
- as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012.
-
- References:
- https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/
- http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml
- http://www.gmbz.org.cn/main/bzlb.html
-
-config CRYPTO_CURVE25519
- tristate "Curve25519"
- select CRYPTO_KPP
- select CRYPTO_LIB_CURVE25519_GENERIC
- help
- Curve25519 elliptic curve (RFC7748)
-
endmenu
menu "Block ciphers"
@@ -589,6 +601,7 @@ menu "Length-preserving ciphers and modes"
config CRYPTO_ADIANTUM
tristate "Adiantum"
select CRYPTO_CHACHA20
+ select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
@@ -626,7 +639,7 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
- select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_LIB_CHACHA
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@@ -655,15 +668,6 @@ config CRYPTO_CBC
This block cipher mode is required for IPSec ESP (XFRM_ESP).
-config CRYPTO_CFB
- tristate "CFB (Cipher Feedback)"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- CFB (Cipher Feedback) mode (NIST SP800-38A)
-
- This block cipher mode is required for TPM2 Cryptography.
-
config CRYPTO_CTR
tristate "CTR (Counter)"
select CRYPTO_SKCIPHER
@@ -684,7 +688,7 @@ config CRYPTO_CTS
config CRYPTO_ECB
tristate "ECB (Electronic Codebook)"
- select CRYPTO_SKCIPHER
+ select CRYPTO_SKCIPHER2
select CRYPTO_MANAGER
help
ECB (Electronic Codebook) mode (NIST SP800-38A)
@@ -692,7 +696,7 @@ config CRYPTO_ECB
config CRYPTO_HCTR2
tristate "HCTR2"
select CRYPTO_XCTR
- select CRYPTO_POLYVAL
+ select CRYPTO_LIB_POLYVAL
select CRYPTO_MANAGER
help
HCTR2 length-preserving encryption mode
@@ -704,19 +708,11 @@ config CRYPTO_HCTR2
See https://eprint.iacr.org/2021/1441
-config CRYPTO_KEYWRAP
- tristate "KW (AES Key Wrap)"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F
- and RFC3394) without padding.
-
config CRYPTO_LRW
tristate "LRW (Liskov Rivest Wagner)"
+ select CRYPTO_LIB_GF128MUL
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
- select CRYPTO_GF128MUL
select CRYPTO_ECB
help
LRW (Liskov Rivest Wagner) mode
@@ -729,20 +725,6 @@ config CRYPTO_LRW
See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf
-config CRYPTO_OFB
- tristate "OFB (Output Feedback)"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- OFB (Output Feedback) mode (NIST SP800-38A)
-
- This mode makes a block cipher into a synchronous
- stream cipher. It generates keystream blocks, which are then XORed
- with the plaintext blocks to get the ciphertext. Flipping a bit in the
- ciphertext produces a flipped bit in the plaintext at the same
- location. This property allows many error correcting codes to function
- normally even when applied before encryption.
-
config CRYPTO_PCBC
tristate "PCBC (Propagating Cipher Block Chaining)"
select CRYPTO_SKCIPHER
@@ -780,6 +762,7 @@ config CRYPTO_XTS
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
+ select CRYPTO_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC
endmenu
@@ -806,8 +789,8 @@ config CRYPTO_AEGIS128_SIMD
config CRYPTO_CHACHA20POLY1305
tristate "ChaCha20-Poly1305"
select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
select CRYPTO_AEAD
+ select CRYPTO_LIB_POLY1305
select CRYPTO_MANAGER
help
ChaCha20 stream cipher and Poly1305 authenticator combined
@@ -828,7 +811,6 @@ config CRYPTO_GCM
select CRYPTO_CTR
select CRYPTO_AEAD
select CRYPTO_GHASH
- select CRYPTO_NULL
select CRYPTO_MANAGER
help
GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
@@ -836,13 +818,15 @@ config CRYPTO_GCM
This is required for IPSec ESP (XFRM_ESP).
-config CRYPTO_SEQIV
- tristate "Sequence Number IV Generator"
+config CRYPTO_GENIV
+ tristate
select CRYPTO_AEAD
- select CRYPTO_SKCIPHER
- select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
select CRYPTO_MANAGER
+ select CRYPTO_RNG_DEFAULT
+
+config CRYPTO_SEQIV
+ tristate "Sequence Number IV Generator"
+ select CRYPTO_GENIV
help
Sequence Number IV generator
@@ -853,10 +837,7 @@ config CRYPTO_SEQIV
config CRYPTO_ECHAINIV
tristate "Encrypted Chain IV Generator"
- select CRYPTO_AEAD
- select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
- select CRYPTO_MANAGER
+ select CRYPTO_GENIV
help
Encrypted Chain IV generator
@@ -900,6 +881,7 @@ menu "Hashes, digests, and MACs"
config CRYPTO_BLAKE2B
tristate "BLAKE2b"
select CRYPTO_HASH
+ select CRYPTO_LIB_BLAKE2B
help
BLAKE2b cryptographic hash function (RFC 7693)
@@ -926,8 +908,8 @@ config CRYPTO_CMAC
config CRYPTO_GHASH
tristate "GHASH"
- select CRYPTO_GF128MUL
select CRYPTO_HASH
+ select CRYPTO_LIB_GF128MUL
help
GCM GHASH function (NIST SP800-38D)
@@ -950,8 +932,9 @@ config CRYPTO_MD4
config CRYPTO_MD5
tristate "MD5"
select CRYPTO_HASH
+ select CRYPTO_LIB_MD5
help
- MD5 message digest algorithm (RFC1321)
+ MD5 message digest algorithm (RFC1321), including HMAC support.
config CRYPTO_MICHAEL_MIC
tristate "Michael MIC"
@@ -965,27 +948,6 @@ config CRYPTO_MICHAEL_MIC
This algorithm is required for TKIP, but it should not be used for
other purposes because of the weakness of the algorithm.
-config CRYPTO_POLYVAL
- tristate
- select CRYPTO_GF128MUL
- select CRYPTO_HASH
- help
- POLYVAL hash function for HCTR2
-
- This is used in HCTR2. It is not a general-purpose
- cryptographic hash function.
-
-config CRYPTO_POLY1305
- tristate "Poly1305"
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
- It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
- in IETF protocols. This is the portable C implementation of Poly1305.
-
config CRYPTO_RMD160
tristate "RIPEMD-160"
select CRYPTO_HASH
@@ -1009,14 +971,16 @@ config CRYPTO_SHA1
select CRYPTO_HASH
select CRYPTO_LIB_SHA1
help
- SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3)
+ SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3), including
+ HMAC support.
config CRYPTO_SHA256
tristate "SHA-224 and SHA-256"
select CRYPTO_HASH
select CRYPTO_LIB_SHA256
help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC
+ 10118-3), including HMAC support.
This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP).
Used by the btrfs filesystem, Ceph, NFS, and SMB.
@@ -1024,22 +988,22 @@ config CRYPTO_SHA256
config CRYPTO_SHA512
tristate "SHA-384 and SHA-512"
select CRYPTO_HASH
+ select CRYPTO_LIB_SHA512
help
- SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC
+ 10118-3), including HMAC support.
config CRYPTO_SHA3
tristate "SHA-3"
select CRYPTO_HASH
+ select CRYPTO_LIB_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
-config CRYPTO_SM3
- tristate
-
config CRYPTO_SM3_GENERIC
tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3)
@@ -1063,16 +1027,6 @@ config CRYPTO_STREEBOG
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
-config CRYPTO_VMAC
- tristate "VMAC"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- VMAC is a message authentication algorithm designed for
- very high speed on 64-bit architectures.
-
- See https://fastcrypto.org/vmac for further information.
-
config CRYPTO_WP512
tristate "Whirlpool"
select CRYPTO_HASH
@@ -1133,25 +1087,6 @@ config CRYPTO_CRC32
Used by RoCEv2 and f2fs.
-config CRYPTO_CRCT10DIF
- tristate "CRCT10DIF"
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- CRC algorithm used by the SCSI Block Commands standard.
-
-config CRYPTO_CRC64_ROCKSOFT
- tristate "CRC64 based on Rocksoft Model algorithm"
- depends on CRC64
- select CRYPTO_HASH
- help
- CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm
-
- Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY)
-
- See https://zlib.net/crc_v3.txt
-
endmenu
menu "Compression"
@@ -1226,17 +1161,6 @@ endmenu
menu "Random number generation"
-config CRYPTO_ANSI_CPRNG
- tristate "ANSI PRNG (Pseudo Random Number Generator)"
- select CRYPTO_AES
- select CRYPTO_RNG
- help
- Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4)
-
- This uses the AES cipher algorithm.
-
- Note that this option must be enabled if CRYPTO_FIPS is selected
-
menuconfig CRYPTO_DRBG_MENU
tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)"
help
@@ -1262,8 +1186,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "CTR_DRBG"
- select CRYPTO_AES
- select CRYPTO_CTR
+ select CRYPTO_DF80090A
help
CTR_DRBG variant as defined in NIST SP800-90A.
@@ -1280,21 +1203,130 @@ endif # if CRYPTO_DRBG_MENU
config CRYPTO_JITTERENTROPY
tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)"
select CRYPTO_RNG
+ select CRYPTO_SHA3
help
CPU Jitter RNG (Random Number Generator) from the Jitterentropy library
A non-physical non-deterministic ("true") RNG (e.g., an entropy source
compliant with NIST SP800-90B) intended to provide a seed to a
- deterministic RNG (e.g. per NIST SP800-90C).
+ deterministic RNG (e.g., per NIST SP800-90C).
This RNG does not perform any cryptographic whitening of the generated
+ random numbers.
+
+ See https://www.chronox.de/jent/
+
+if CRYPTO_JITTERENTROPY
+if CRYPTO_FIPS && EXPERT
+
+choice
+ prompt "CPU Jitter RNG Memory Size"
+ default CRYPTO_JITTERENTROPY_MEMSIZE_2
+ help
+ The Jitter RNG measures the execution time of memory accesses.
+ Multiple consecutive memory accesses are performed. If the memory
+ size fits into a cache (e.g. L1), only the memory access timing
+ to that cache is measured. The closer the cache is to the CPU
+ the less variations are measured and thus the less entropy is
+ obtained. Thus, if the memory size fits into the L1 cache, the
+ obtained entropy is less than if the memory size fits within
+ L1 + L2, which in turn is less if the memory fits into
+ L1 + L2 + L3. Thus, by selecting a different memory size,
+ the entropy rate produced by the Jitter RNG can be modified.
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_2
+ bool "2048 Bytes (default)"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_128
+ bool "128 kBytes"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ bool "1024 kBytes"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_8192
+ bool "8192 kBytes"
+endchoice
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+ int
+ default 64 if CRYPTO_JITTERENTROPY_MEMSIZE_2
+ default 512 if CRYPTO_JITTERENTROPY_MEMSIZE_128
+ default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ default 4096 if CRYPTO_JITTERENTROPY_MEMSIZE_8192
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+ int
+ default 32 if CRYPTO_JITTERENTROPY_MEMSIZE_2
+ default 256 if CRYPTO_JITTERENTROPY_MEMSIZE_128
+ default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192
+
+config CRYPTO_JITTERENTROPY_OSR
+ int "CPU Jitter RNG Oversampling Rate"
+ range 1 15
+ default 3
+ help
+ The Jitter RNG allows the specification of an oversampling rate (OSR).
+ The Jitter RNG operation requires a fixed amount of timing
+ measurements to produce one output block of random numbers. The
+ OSR value is multiplied with the amount of timing measurements to
+ generate one output block. Thus, the timing measurement is oversampled
+ by the OSR factor. The oversampling allows the Jitter RNG to operate
+ on hardware whose timers deliver limited amount of entropy (e.g.
+ the timer is coarse) by setting the OSR to a higher value. The
+ trade-off, however, is that the Jitter RNG now requires more time
+ to generate random numbers.
+
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool "CPU Jitter RNG Test Interface"
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned high resolution time stamp noise that
+ is collected by the Jitter RNG for statistical analysis. As
+ this data is used at the same time to generate random bits,
+ the Jitter RNG operates in an insecure mode as long as the
+ recording is enabled. This interface therefore is only
+ intended for testing purposes and is not suitable for
+ production systems.
+
+ The raw noise data can be obtained using the jent_raw_hires
+ debugfs file. Using the option
+ jitterentropy_testing.boot_raw_hires_test=1 the raw noise of
+ the first 1000 entropy events since boot can be sampled.
+
+ If unsure, select N.
+
+endif # if CRYPTO_FIPS && EXPERT
+
+if !(CRYPTO_FIPS && EXPERT)
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+ int
+ default 64
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+ int
+ default 32
+
+config CRYPTO_JITTERENTROPY_OSR
+ int
+ default 1
+
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool
- See https://www.chronox.de/jent.html
+endif # if !(CRYPTO_FIPS && EXPERT)
+endif # if CRYPTO_JITTERENTROPY
config CRYPTO_KDF800108_CTR
tristate
select CRYPTO_HMAC
select CRYPTO_SHA256
+config CRYPTO_DF80090A
+ tristate
+ select CRYPTO_AES
+ select CRYPTO_CTR
+
endmenu
menu "Userspace interface"
@@ -1352,7 +1384,6 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_NULL
select CRYPTO_USER_API
help
Enable the userspace interface for AEAD cipher algorithms.
@@ -1369,28 +1400,8 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE
already been phased out from internal use by the kernel, and are
only useful for userspace clients that still rely on them.
-config CRYPTO_STATS
- bool "Crypto usage statistics"
- depends on CRYPTO_USER
- help
- Enable the gathering of crypto stats.
-
- This collects data sizes, numbers of requests, and numbers
- of errors processed by:
- - AEAD ciphers (encrypt, decrypt)
- - asymmetric key ciphers (encrypt, decrypt, verify, sign)
- - symmetric key ciphers (encrypt, decrypt)
- - compression algorithms (compress, decompress)
- - hash algorithms (hash)
- - key-agreement protocol primitives (setsecret, generate
- public key, compute shared secret)
- - RNG (generate, seed)
-
endmenu
-config CRYPTO_HASH_INFO
- bool
-
if !KMSAN # avoid false positives from assembly
if ARM
source "arch/arm/crypto/Kconfig"
@@ -1398,12 +1409,18 @@ endif
if ARM64
source "arch/arm64/crypto/Kconfig"
endif
+if LOONGARCH
+source "arch/loongarch/crypto/Kconfig"
+endif
if MIPS
source "arch/mips/crypto/Kconfig"
endif
if PPC
source "arch/powerpc/crypto/Kconfig"
endif
+if RISCV
+source "arch/riscv/crypto/Kconfig"
+endif
if S390
source "arch/s390/crypto/Kconfig"
endif
@@ -1418,5 +1435,6 @@ endif
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
+source "crypto/krb5/Kconfig"
endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 303b21c43df0..16a35649dd91 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
@@ -14,9 +14,16 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
-obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o
+obj-$(CONFIG_CRYPTO_GENIV) += geniv.o
+
+crypto_skcipher-y += lskcipher.o
+crypto_skcipher-y += skcipher.o
+
+obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+obj-$(CONFIG_CRYPTO_SKCIPHER2) += bpf_crypto_skcipher.o
+endif
-obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
@@ -25,7 +32,9 @@ crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
+obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
+obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o
@@ -40,19 +49,14 @@ rsa_generic-y += rsaprivkey.asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
+rsa_generic-y += rsassa-pkcs1.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
-$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h
-$(obj)/sm2.o: $(obj)/sm2signature.asn1.h
-
-sm2_generic-y += sm2signature.asn1.o
-sm2_generic-y += sm2.o
-
-obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
-
$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
-$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
+$(obj)/ecdsa-x962.o: $(obj)/ecdsasignature.asn1.h
ecdsa_generic-y += ecdsa.o
+ecdsa_generic-y += ecdsa-x962.o
+ecdsa_generic-y += ecdsa-p1363.o
ecdsa_generic-y += ecdsasignature.asn1.o
obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o
@@ -64,31 +68,24 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
-crypto_user-y := crypto_user_base.o
-crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
-obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
-obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
-obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
-obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
-obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
-obj-$(CONFIG_CRYPTO_SM3) += sm3.o
+obj-$(CONFIG_CRYPTO_SHA1) += sha1.o
+obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
+obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
+obj-$(CONFIG_CRYPTO_SHA3) += sha3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
-obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
-CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
-obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
+obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
-obj-$(CONFIG_CRYPTO_CFB) += cfb.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_CTS) += cts.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
@@ -96,7 +93,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
-obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
@@ -150,41 +146,39 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
-obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o
+CFLAGS_chacha.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
-obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
-obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
+obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o
+crc32c-cryptoapi-y := crc32c.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o
+crc32-cryptoapi-y := crc32.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
-obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
CFLAGS_jitterentropy.o = -O0
KASAN_SANITIZE_jitterentropy.o = n
UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
-obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
+obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
-obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
-obj-$(CONFIG_CRYPTO_OFB) += ofb.o
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o
-obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
@@ -204,7 +198,6 @@ obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
-obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
crypto_simd-y := simd.o
obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
@@ -212,3 +205,7 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
# Key derivation function
#
obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o
+
+obj-$(CONFIG_CRYPTO_DF80090A) += df_sp80090a.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5/
diff --git a/crypto/acompress.c b/crypto/acompress.c
index c32c72048a1c..be28cbfd22e3 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -6,25 +6,50 @@
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <linux/errno.h>
+
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
+
+struct crypto_scomp;
+
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_DST_LINEAR = 1 << 2,
+};
static const struct crypto_type crypto_acomp_type;
-#ifdef CONFIG_NET
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static void acomp_reqchain_done(void *data, int err);
+
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, calg.base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static int __maybe_unused crypto_acomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
@@ -34,12 +59,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
-#else
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -54,29 +73,54 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
- alg->exit(acomp);
+ if (alg->exit)
+ alg->exit(acomp);
+
+ if (acomp_is_async(acomp))
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
+ struct crypto_acomp *fb = NULL;
+ int err;
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
+ if (acomp_is_async(acomp)) {
+ fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ err = -EINVAL;
+ if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
+ goto out_free_fb;
+
+ tfm->fb = crypto_acomp_tfm(fb);
+ }
+
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->dst_free = alg->dst_free;
- acomp->reqsize = alg->reqsize;
+ acomp->reqsize = alg->base.cra_reqsize;
- if (alg->exit)
- acomp->base.exit = crypto_acomp_exit_tfm;
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (!alg->init)
+ return 0;
- if (alg->init)
- return alg->init(acomp);
+ err = alg->init(acomp);
+ if (err)
+ goto out_free_fb;
return 0;
+
+out_free_fb:
+ crypto_free_acomp(fb);
+ return err;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
@@ -95,11 +139,14 @@ static const struct crypto_type crypto_acomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
+ .algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
@@ -117,42 +164,161 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct acomp_req *req;
+ struct acomp_req_chain *state = &req->chain;
+
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+}
- req = __acomp_request_alloc(acomp);
- if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
- return crypto_acomp_scomp_alloc_ctx(req);
+static void acomp_restore_req(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = req->base.data;
- return req;
+ req->base.complete = state->compl;
+ req->base.data = state->data;
}
-EXPORT_SYMBOL_GPL(acomp_request_alloc);
-void acomp_request_free(struct acomp_req *req)
+static void acomp_reqchain_virt(struct acomp_req *req)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req_chain *state = &req->chain;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+
+ if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
+ acomp_request_set_src_dma(req, state->src, slen);
+ if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
+ acomp_request_set_dst_dma(req, state->dst, dlen);
+}
- if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
- crypto_acomp_scomp_free_ctx(req);
+static void acomp_virt_to_sg(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = &req->chain;
- if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
- acomp->dst_free(req->dst);
- req->dst = NULL;
+ state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
+
+ if (acomp_request_src_isvirt(req)) {
+ unsigned int slen = req->slen;
+ const u8 *svirt = req->svirt;
+
+ state->src = svirt;
+ sg_init_one(&state->ssg, svirt, slen);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
}
- __acomp_request_free(req);
+ if (acomp_request_dst_isvirt(req)) {
+ unsigned int dlen = req->dlen;
+ u8 *dvirt = req->dvirt;
+
+ state->dst = dvirt;
+ sg_init_one(&state->dsg, dvirt, dlen);
+ acomp_request_set_dst_sg(req, &state->dsg, dlen);
+ }
}
-EXPORT_SYMBOL_GPL(acomp_request_free);
-int crypto_register_acomp(struct acomp_alg *alg)
+static int acomp_do_nondma(struct acomp_req *req, bool comp)
+{
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
+ int err;
+
+ if (comp)
+ err = crypto_acomp_compress(fbreq);
+ else
+ err = crypto_acomp_decompress(fbreq);
+
+ req->dlen = fbreq->dlen;
+ return err;
+}
+
+static int acomp_do_one_req(struct acomp_req *req, bool comp)
+{
+ if (acomp_request_isnondma(req))
+ return acomp_do_nondma(req, comp);
+
+ acomp_virt_to_sg(req);
+ return comp ? crypto_acomp_reqtfm(req)->compress(req) :
+ crypto_acomp_reqtfm(req)->decompress(req);
+}
+
+static int acomp_reqchain_finish(struct acomp_req *req, int err)
+{
+ acomp_reqchain_virt(req);
+ acomp_restore_req(req);
+ return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+ struct acomp_req *req = data;
+ crypto_completion_t compl;
+
+ compl = req->chain.compl;
+ data = req->chain.data;
+
+ if (err == -EINPROGRESS)
+ goto notify;
+
+ err = acomp_reqchain_finish(req, err);
+
+notify:
+ compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req, bool comp)
+{
+ int err;
+
+ acomp_save_req(req, acomp_reqchain_done);
+
+ err = acomp_do_one_req(req, comp);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return err;
+
+ return acomp_reqchain_finish(req, err);
+}
+
+int crypto_acomp_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->compress(req);
+ return acomp_do_req_chain(req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->decompress(req);
+ return acomp_do_req_chain(req, false);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
+
+void comp_prepare_alg(struct comp_alg_common *alg)
{
struct crypto_alg *base = &alg->base;
- base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
@@ -194,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ s->streams = NULL;
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct acomp_req *nreq;
+
+ nreq = container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+ if (nreq == req)
+ return req;
+
+ if (req->src == &req->chain.ssg)
+ nreq->src = &nreq->chain.ssg;
+ if (req->dst == &req->chain.dsg)
+ nreq->dst = &nreq->chain.dsg;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 84450130cb6b..a6bca877c3c7 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req)
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
static int adiantum_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl, le128 *digest)
+ struct scatterlist *sgl, unsigned int nents,
+ le128 *digest)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct shash_desc *hash_desc = &rctx->u.hash_desc;
@@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req,
unsigned int i, n;
int err;
- hash_desc->tfm = tctx->hash;
-
err = crypto_shash_init(hash_desc);
if (err)
return err;
- sg_miter_start(&miter, sgl, sg_nents(sgl),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
@@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req)
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct scatterlist *dst = req->dst;
+ const unsigned int dst_nents = sg_nents(dst);
le128 digest;
int err;
@@ -298,20 +296,38 @@ static int adiantum_finish(struct skcipher_request *req)
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
- err = adiantum_hash_message(req, req->dst, &digest);
- if (err)
- return err;
- le128_add(&digest, &digest, &rctx->header_hash);
- le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
- scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
- bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
+ rctx->u.hash_desc.tfm = tctx->hash;
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
+ if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
+ /* Fast path for single-page destination */
+ struct page *page = sg_page(dst);
+ void *virt = kmap_local_page(page) + dst->offset;
+
+ err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
+ (u8 *)&digest);
+ if (err) {
+ kunmap_local(virt);
+ return err;
+ }
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
+ flush_dcache_page(page);
+ kunmap_local(virt);
+ } else {
+ /* Slow path that works for any destination scatterlist */
+ err = adiantum_hash_message(req, dst, dst_nents, &digest);
+ if (err)
+ return err;
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
+ bulk_len, sizeof(le128), 1);
+ }
return 0;
}
-static void adiantum_streamcipher_done(struct crypto_async_request *areq,
- int err)
+static void adiantum_streamcipher_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err)
err = adiantum_finish(req);
@@ -325,6 +341,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct scatterlist *src = req->src;
+ const unsigned int src_nents = sg_nents(src);
unsigned int stream_len;
le128 digest;
int err;
@@ -340,12 +358,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
- err = adiantum_hash_message(req, req->src, &digest);
+ rctx->u.hash_desc.tfm = tctx->hash;
+ if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
+ /* Fast path for single-page source */
+ void *virt = kmap_local_page(sg_page(src)) + src->offset;
+
+ err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
+ (u8 *)&digest);
+ memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
+ kunmap_local(virt);
+ } else {
+ /* Slow path that works for any source scatterlist */
+ err = adiantum_hash_message(req, src, src_nents, &digest);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
+ bulk_len, sizeof(le128), 0);
+ }
if (err)
return err;
- le128_add(&digest, &digest, &rctx->header_hash);
- scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
- bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
+ le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
@@ -469,7 +499,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
-static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
+static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
struct crypto_alg *blockcipher_alg,
struct shash_alg *hash_alg)
{
@@ -495,7 +525,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
- struct skcipher_alg *streamcipher_alg;
+ struct skcipher_alg_common *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *hash_alg;
int err;
@@ -515,7 +545,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
- streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
+ streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
@@ -562,8 +592,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
- inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
- hash_alg->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask;
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
@@ -579,8 +608,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.decrypt = adiantum_decrypt;
inst->alg.init = adiantum_init_tfm;
inst->alg.exit = adiantum_exit_tfm;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
+ inst->alg.min_keysize = streamcipher_alg->min_keysize;
+ inst->alg.max_keysize = streamcipher_alg->max_keysize;
inst->alg.ivsize = TWEAK_SIZE;
inst->free = adiantum_free_instance;
@@ -610,11 +639,11 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-subsys_initcall(adiantum_module_init);
+module_init(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("adiantum");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/aead.c b/crypto/aead.c
index 16991095270d..08d44c5e5c33 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -8,13 +8,15 @@
*/
#include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "internal.h"
@@ -35,8 +37,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kfree_sensitive(buffer);
return ret;
}
@@ -83,36 +84,25 @@ EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
- crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stats_aead_encrypt(cryptlen, alg, ret);
- return ret;
+ return -ENOKEY;
+
+ return crypto_aead_alg(aead)->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
- crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (req->cryptlen < crypto_aead_authsize(aead))
- ret = -EINVAL;
- else
- ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stats_aead_decrypt(cryptlen, alg, ret);
- return ret;
+ return -ENOKEY;
+
+ if (req->cryptlen < crypto_aead_authsize(aead))
+ return -EINVAL;
+
+ return crypto_aead_alg(aead)->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -130,6 +120,7 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct aead_alg *alg = crypto_aead_alg(aead);
crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+ crypto_aead_set_reqsize(aead, crypto_tfm_alg_reqsize(tfm));
aead->authsize = alg->maxauthsize;
@@ -142,8 +133,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-#ifdef CONFIG_NET
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_aead_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@@ -159,12 +150,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
-#else
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -173,8 +158,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
seq_printf(m, "type : aead\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
@@ -195,11 +180,14 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_aead_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
.tfmsize = offsetof(struct crypto_aead, base),
+ .algsize = offsetof(struct aead_alg, base),
};
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
@@ -217,6 +205,31 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask)
+{
+ struct crypto_aead *tfm;
+
+ /* Only sync algorithms are allowed. */
+ mask |= CRYPTO_ALG_ASYNC;
+ type &= ~(CRYPTO_ALG_ASYNC);
+
+ tfm = crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask);
+
+ if (!IS_ERR(tfm) && WARN_ON(crypto_aead_reqsize(tfm) > MAX_SYNC_AEAD_REQSIZE)) {
+ crypto_free_aead(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return (struct crypto_sync_aead *)tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sync_aead);
+
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_aead);
+
static int aead_prepare_alg(struct aead_alg *alg)
{
struct crypto_alg *base = &alg->base;
diff --git a/crypto/aegis-neon.h b/crypto/aegis-neon.h
new file mode 100644
index 000000000000..61e5614b45de
--- /dev/null
+++ b/crypto/aegis-neon.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef _AEGIS_NEON_H
+#define _AEGIS_NEON_H
+
+void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
+void crypto_aegis128_update_neon(void *state, const void *msg);
+void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
+ unsigned int size);
+void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
+ unsigned int size);
+int crypto_aegis128_final_neon(void *state, void *tag_xor,
+ unsigned int assoclen,
+ unsigned int cryptlen,
+ unsigned int authsize);
+
+#endif
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index c4f1bfa1d04f..ca80d861345d 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
- unsigned int size = scatterwalk_clamp(&walk, assoclen);
+ unsigned int size = scatterwalk_next(&walk, assoclen);
+ const u8 *src = walk.addr;
unsigned int left = size;
- void *mapped = scatterwalk_map(&walk);
- const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
@@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
pos += left;
assoclen -= size;
- scatterwalk_unmap(mapped);
- scatterwalk_advance(&walk, size);
- scatterwalk_done(&walk, 0, assoclen);
+ scatterwalk_done_src(&walk, size);
}
if (pos > 0) {
@@ -323,8 +320,9 @@ static __always_inline
int crypto_aegis128_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk,
void (*crypt)(struct aegis_state *state,
- u8 *dst, const u8 *src,
- unsigned int size))
+ u8 *dst,
+ const u8 *src,
+ unsigned int size))
{
int err = 0;
@@ -515,7 +513,6 @@ static struct aead_alg crypto_aegis128_alg_generic = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
@@ -534,7 +531,6 @@ static struct aead_alg crypto_aegis128_alg_simd = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
@@ -570,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
-subsys_initcall(crypto_aegis128_module_init);
+module_init(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c
index 7de485907d81..b6a52a386b22 100644
--- a/crypto/aegis128-neon-inner.c
+++ b/crypto/aegis128-neon-inner.c
@@ -16,6 +16,7 @@
#define AEGIS_BLOCK_SIZE 16
#include <stddef.h>
+#include "aegis-neon.h"
extern int aegis128_have_aes_insn;
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
index a7856915ec85..b41807e63bd3 100644
--- a/crypto/aegis128-neon.c
+++ b/crypto/aegis128-neon.c
@@ -4,20 +4,10 @@
*/
#include <asm/cpufeature.h>
-#include <asm/neon.h>
+#include <asm/simd.h>
#include "aegis.h"
-
-void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
-void crypto_aegis128_update_neon(void *state, const void *msg);
-void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
- unsigned int size);
-void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
- unsigned int size);
-int crypto_aegis128_final_neon(void *state, void *tag_xor,
- unsigned int assoclen,
- unsigned int cryptlen,
- unsigned int authsize);
+#include "aegis-neon.h"
int aegis128_have_aes_insn __ro_after_init;
@@ -34,32 +24,28 @@ void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
- kernel_neon_begin();
- crypto_aegis128_init_neon(state, key, iv);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_init_neon(state, key, iv);
}
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
{
- kernel_neon_begin();
- crypto_aegis128_update_neon(state, msg);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_update_neon(state, msg);
}
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
- kernel_neon_begin();
- crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
}
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
- kernel_neon_begin();
- crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
- kernel_neon_end();
+ scoped_ksimd()
+ crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
}
int crypto_aegis128_final_simd(struct aegis_state *state,
@@ -68,12 +54,7 @@ int crypto_aegis128_final_simd(struct aegis_state *state,
unsigned int cryptlen,
unsigned int authsize)
{
- int ret;
-
- kernel_neon_begin();
- ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen,
- authsize);
- kernel_neon_end();
-
- return ret;
+ scoped_ksimd()
+ return crypto_aegis128_final_neon(state, tag_xor, assoclen,
+ cryptlen, authsize);
}
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 27ab27931813..85d2e78c8ef2 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -48,13 +48,13 @@
*/
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static inline u8 byte(const u32 x, const unsigned n)
{
@@ -1311,7 +1311,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-subsys_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 205c2c257d49..a3b342f92fab 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -6,7 +6,7 @@
*/
#include <crypto/aes.h>
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <linux/module.h>
static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index e893c0f6c879..e468714f539d 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -12,6 +12,8 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/key-type.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/net.h>
@@ -19,6 +21,10 @@
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/security.h>
+#include <linux/string.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
struct alg_type_list {
const struct af_alg_type *type;
@@ -139,7 +145,7 @@ void af_alg_release_parent(struct sock *sk)
}
EXPORT_SYMBOL_GPL(af_alg_release_parent);
-static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+static int alg_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
{
const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
@@ -222,6 +228,132 @@ out:
return err;
}
+#ifdef CONFIG_KEYS
+
+static const u8 *key_data_ptr_user(const struct key *key,
+ unsigned int *datalen)
+{
+ const struct user_key_payload *ukp;
+
+ ukp = user_key_payload_locked(key);
+ if (IS_ERR_OR_NULL(ukp))
+ return ERR_PTR(-EKEYREVOKED);
+
+ *datalen = key->datalen;
+
+ return ukp->data;
+}
+
+static const u8 *key_data_ptr_encrypted(const struct key *key,
+ unsigned int *datalen)
+{
+ const struct encrypted_key_payload *ekp;
+
+ ekp = dereference_key_locked(key);
+ if (IS_ERR_OR_NULL(ekp))
+ return ERR_PTR(-EKEYREVOKED);
+
+ *datalen = ekp->decrypted_datalen;
+
+ return ekp->decrypted_data;
+}
+
+static const u8 *key_data_ptr_trusted(const struct key *key,
+ unsigned int *datalen)
+{
+ const struct trusted_key_payload *tkp;
+
+ tkp = dereference_key_locked(key);
+ if (IS_ERR_OR_NULL(tkp))
+ return ERR_PTR(-EKEYREVOKED);
+
+ *datalen = tkp->key_len;
+
+ return tkp->key;
+}
+
+static struct key *lookup_key(key_serial_t serial)
+{
+ key_ref_t key_ref;
+
+ key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref))
+ return ERR_CAST(key_ref);
+
+ return key_ref_to_ptr(key_ref);
+}
+
+static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
+ unsigned int optlen)
+{
+ const struct af_alg_type *type = ask->type;
+ u8 *key_data = NULL;
+ unsigned int key_datalen;
+ key_serial_t serial;
+ struct key *key;
+ const u8 *ret;
+ int err;
+
+ if (optlen != sizeof(serial))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&serial, optval, optlen))
+ return -EFAULT;
+
+ key = lookup_key(serial);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ down_read(&key->sem);
+
+ ret = ERR_PTR(-ENOPROTOOPT);
+ if (!strcmp(key->type->name, "user") ||
+ !strcmp(key->type->name, "logon")) {
+ ret = key_data_ptr_user(key, &key_datalen);
+ } else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) &&
+ !strcmp(key->type->name, "encrypted")) {
+ ret = key_data_ptr_encrypted(key, &key_datalen);
+ } else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) &&
+ !strcmp(key->type->name, "trusted")) {
+ ret = key_data_ptr_trusted(key, &key_datalen);
+ }
+
+ if (IS_ERR(ret)) {
+ up_read(&key->sem);
+ key_put(key);
+ return PTR_ERR(ret);
+ }
+
+ key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
+ if (!key_data) {
+ up_read(&key->sem);
+ key_put(key);
+ return -ENOMEM;
+ }
+
+ memcpy(key_data, ret, key_datalen);
+
+ up_read(&key->sem);
+ key_put(key);
+
+ err = type->setkey(ask->private, key_data, key_datalen);
+
+ sock_kzfree_s(&ask->sk, key_data, key_datalen);
+
+ return err;
+}
+
+#else
+
+static inline int alg_setkey_by_key_serial(struct alg_sock *ask,
+ sockptr_t optval,
+ unsigned int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+#endif
+
static int alg_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -242,12 +374,16 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case ALG_SET_KEY:
+ case ALG_SET_KEY_BY_KEY_SERIAL:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setkey)
goto unlock;
- err = alg_setkey(sk, optval, optlen);
+ if (optname == ALG_SET_KEY_BY_KEY_SERIAL)
+ err = alg_setkey_by_key_serial(ask, optval, optlen);
+ else
+ err = alg_setkey(sk, optval, optlen);
break;
case ALG_SET_AEAD_AUTHSIZE:
if (sock->state == SS_CONNECTED)
@@ -271,7 +407,8 @@ unlock:
return err;
}
-int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
+int af_alg_accept(struct sock *sk, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
@@ -286,7 +423,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
if (!type)
goto unlock;
- sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern);
err = -ENOMEM;
if (!sk2)
goto unlock;
@@ -332,10 +469,10 @@ unlock:
}
EXPORT_SYMBOL_GPL(af_alg_accept);
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int alg_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
- return af_alg_accept(sock->sk, newsock, kern);
+ return af_alg_accept(sock->sk, newsock, arg);
}
static const struct proto_ops alg_proto_ops = {
@@ -349,7 +486,6 @@ static const struct proto_ops alg_proto_ops = {
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
@@ -398,50 +534,25 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
-{
- size_t off;
- ssize_t n;
- int npages, i;
-
- n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
- if (n < 0)
- return n;
-
- npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
- if (WARN_ON(npages == 0))
- return -EINVAL;
- /* Add one extra for linking */
- sg_init_table(sgl->sg, npages + 1);
-
- for (i = 0, len = n; i < npages; i++) {
- int plen = min_t(int, len, PAGE_SIZE - off);
-
- sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
-
- off = 0;
- len -= plen;
- }
- sg_mark_end(sgl->sg + npages - 1);
- sgl->npages = npages;
-
- return n;
-}
-EXPORT_SYMBOL_GPL(af_alg_make_sg);
-
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
}
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
- for (i = 0; i < sgl->npages; i++)
- put_page(sgl->pages[i]);
+ if (sgl->sgt.sgl) {
+ if (sgl->need_unpin)
+ for (i = 0; i < sgl->sgt.nents; i++)
+ unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
+ if (sgl->sgt.sgl != sgl->sgl)
+ kvfree(sgl->sgt.sgl);
+ sgl->sgt.sgl = NULL;
+ }
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
@@ -737,7 +848,7 @@ void af_alg_wmem_wakeup(struct sock *sk)
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
EPOLLRDNORM |
EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
@@ -804,7 +915,7 @@ static void af_alg_data_wakeup(struct sock *sk)
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLRDNORM |
EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
@@ -859,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
+ if (ctx->write) {
+ release_sock(sk);
+ return -EBUSY;
+ }
+ ctx->write = true;
+
if (ctx->init && !ctx->more) {
if (ctx->used) {
err = -EINVAL;
@@ -882,10 +999,10 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
while (size) {
struct scatterlist *sg;
size_t len = size;
- size_t plen;
+ ssize_t plen;
/* use the existing memory in an allocated page */
- if (ctx->merge) {
+ if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
sgl = list_entry(ctx->tsgl_list.prev,
struct af_alg_tsgl, list);
sg = sgl->sg + sgl->cur - 1;
@@ -908,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
continue;
}
+ ctx->merge = 0;
+
if (!af_alg_writable(sk)) {
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
if (err)
@@ -927,116 +1046,77 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (sgl->cur)
sg_unmark_end(sg + sgl->cur - 1);
- do {
- struct page *pg;
- unsigned int i = sgl->cur;
-
- plen = min_t(size_t, len, PAGE_SIZE);
-
- pg = alloc_page(GFP_KERNEL);
- if (!pg) {
- err = -ENOMEM;
+ if (msg->msg_flags & MSG_SPLICE_PAGES) {
+ struct sg_table sgtable = {
+ .sgl = sg,
+ .nents = sgl->cur,
+ .orig_nents = sgl->cur,
+ };
+
+ plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable,
+ MAX_SGL_ENTS - sgl->cur, 0);
+ if (plen < 0) {
+ err = plen;
goto unlock;
}
- sg_assign_page(sg + i, pg);
-
- err = memcpy_from_msg(page_address(sg_page(sg + i)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- goto unlock;
- }
-
- sg[i].length = plen;
+ for (; sgl->cur < sgtable.nents; sgl->cur++)
+ get_page(sg_page(&sg[sgl->cur]));
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
- sgl->cur++;
- } while (len && sgl->cur < MAX_SGL_ENTS);
-
- if (!size)
- sg_mark_end(sg + sgl->cur - 1);
-
- ctx->merge = plen & (PAGE_SIZE - 1);
- }
-
- err = 0;
+ } else {
+ do {
+ struct page *pg;
+ unsigned int i = sgl->cur;
- ctx->more = msg->msg_flags & MSG_MORE;
+ plen = min_t(size_t, len, PAGE_SIZE);
-unlock:
- af_alg_data_wakeup(sk);
- release_sock(sk);
-
- return copied ?: err;
-}
-EXPORT_SYMBOL_GPL(af_alg_sendmsg);
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg) {
+ err = -ENOMEM;
+ goto unlock;
+ }
-/**
- * af_alg_sendpage - sendpage system call handler
- * @sock: socket of connection to user space to write to
- * @page: data to send
- * @offset: offset into page to begin sending
- * @size: length of data
- * @flags: message send/receive flags
- *
- * This is a generic implementation of sendpage to fill ctx->tsgl_list.
- */
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct af_alg_ctx *ctx = ask->private;
- struct af_alg_tsgl *sgl;
- int err = -EINVAL;
+ sg_assign_page(sg + i, pg);
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
+ err = memcpy_from_msg(
+ page_address(sg_page(sg + i)),
+ msg, plen);
+ if (err) {
+ __free_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ goto unlock;
+ }
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
+ sg[i].length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ size -= plen;
+ sgl->cur++;
+ } while (len && sgl->cur < MAX_SGL_ENTS);
- if (!size)
- goto done;
+ ctx->merge = plen & (PAGE_SIZE - 1);
+ }
- if (!af_alg_writable(sk)) {
- err = af_alg_wait_for_wmem(sk, flags);
- if (err)
- goto unlock;
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
}
- err = af_alg_alloc_tsgl(sk);
- if (err)
- goto unlock;
-
- ctx->merge = 0;
- sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
-
- if (sgl->cur)
- sg_unmark_end(sgl->sg + sgl->cur - 1);
-
- sg_mark_end(sgl->sg + sgl->cur);
-
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
+ err = 0;
-done:
- ctx->more = flags & MSG_MORE;
+ ctx->more = msg->msg_flags & MSG_MORE;
unlock:
af_alg_data_wakeup(sk);
+ ctx->write = false;
release_sock(sk);
- return err ?: size;
+ return copied ?: err;
}
-EXPORT_SYMBOL_GPL(af_alg_sendpage);
+EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/**
* af_alg_free_resources - release resources required for crypto request
@@ -1045,15 +1125,19 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
void af_alg_free_resources(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
+ struct af_alg_ctx *ctx;
af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
+
+ ctx = alg_sk(sk)->private;
+ ctx->inflight = false;
}
EXPORT_SYMBOL_GPL(af_alg_free_resources);
/**
* af_alg_async_cb - AIO callback handler
- * @_req: async request info
+ * @data: async request completion data
* @err: if non-zero, error result to be returned via ki_complete();
* otherwise return the AIO output length via ki_complete().
*
@@ -1063,9 +1147,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
* The number of bytes to be generated with the AIO operation must be set
* in areq->outlen before the AIO callback handler is invoked.
*/
-void af_alg_async_cb(struct crypto_async_request *_req, int err)
+void af_alg_async_cb(void *data, int err)
{
- struct af_alg_async_req *areq = _req->data;
+ struct af_alg_async_req *areq = data;
struct sock *sk = areq->sk;
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
@@ -1117,17 +1201,25 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen)
{
- struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ struct af_alg_ctx *ctx = alg_sk(sk)->private;
+ struct af_alg_async_req *areq;
+
+ /* Only one AIO request can be in flight. */
+ if (ctx->inflight)
+ return ERR_PTR(-EBUSY);
+ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
+ memset(areq, 0, areqlen);
+
+ ctx->inflight = true;
+
areq->areqlen = areqlen;
areq->sk = sk;
- areq->last_rsgl = NULL;
+ areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
INIT_LIST_HEAD(&areq->rsgl_list);
- areq->tsgl = NULL;
- areq->tsgl_entries = 0;
return areq;
}
@@ -1155,8 +1247,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
while (maxsize > len && msg_data_left(msg)) {
struct af_alg_rsgl *rsgl;
+ ssize_t err;
size_t seglen;
- int err;
/* limit the amount of readable buffers */
if (!af_alg_readable(sk))
@@ -1173,16 +1265,23 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
return -ENOMEM;
}
- rsgl->sgl.npages = 0;
+ rsgl->sgl.need_unpin =
+ iov_iter_extract_will_pin(&msg->msg_iter);
+ rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
+ rsgl->sgl.sgt.nents = 0;
+ rsgl->sgl.sgt.orig_nents = 0;
list_add_tail(&rsgl->list, &areq->rsgl_list);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES);
+ err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt,
+ ALG_MAX_PAGES, 0);
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
+ sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1);
+
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
@@ -1225,5 +1324,6 @@ static void __exit af_alg_exit(void)
module_init(af_alg_init);
module_exit(af_alg_exit);
+MODULE_DESCRIPTION("Crypto userspace interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_ALG);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index c2ca631a111f..66492ae75fcf 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -2,59 +2,82 @@
/*
* Asynchronous Cryptographic Hash operations.
*
- * This is the asynchronous version of hash.c with notification of
- * completion via a callback.
+ * This is the implementation of the ahash (asynchronous hash) API. It differs
+ * from shash (synchronous hash) in that ahash supports asynchronous operations,
+ * and it hashes data from scatterlists instead of virtually addressed buffers.
+ *
+ * The ahash API provides access to both ahash and shash algorithms. The shash
+ * API only provides access to shash algorithms.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
-#include "internal.h"
+#include "hash.h"
-static const struct crypto_type crypto_ahash_type;
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-struct ahash_request_priv {
- crypto_completion_t complete;
- void *data;
- u8 *result;
- u32 flags;
- void *ubuf[] CRYPTO_MINALIGN_ATTR;
-};
+static int ahash_def_finup(struct ahash_request *req);
+
+static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
-static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm)
{
- return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
- halg);
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_ALG_NEED_FALLBACK;
+}
+
+static inline void ahash_op_done(void *data, int err,
+ int (*finish)(struct ahash_request *, int))
+{
+ struct ahash_request *areq = data;
+ crypto_completion_t compl;
+
+ compl = areq->saved_complete;
+ data = areq->saved_data;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = finish(areq, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ compl(data, err);
}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
- unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
- walk->data = kmap_atomic(walk->pg);
+ walk->data = kmap_local_page(walk->pg);
walk->data += offset;
-
- if (offset & alignmask) {
- unsigned int unaligned = alignmask + 1 - (offset & alignmask);
-
- if (nbytes > unaligned)
- nbytes = unaligned;
- }
-
walk->entrylen -= nbytes;
return nbytes;
}
@@ -76,26 +99,37 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
return hash_walk_next(walk);
}
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
{
- unsigned int alignmask = walk->alignmask;
+ walk->total = req->nbytes;
+ walk->entrylen = 0;
- walk->data -= walk->offset;
+ if (!walk->total)
+ return 0;
- if (walk->entrylen && (walk->offset & alignmask) && !err) {
- unsigned int nbytes;
+ walk->flags = req->base.flags;
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(walk->entrylen,
- (unsigned int)(PAGE_SIZE - walk->offset));
- if (nbytes) {
- walk->entrylen -= nbytes;
- walk->data += walk->offset;
- return nbytes;
- }
+ if (ahash_request_isvirt(req)) {
+ walk->data = req->svirt;
+ walk->total = 0;
+ return req->nbytes;
}
- kunmap_atomic(walk->data);
+ walk->sg = req->src;
+
+ return hash_walk_new_entry(walk);
+}
+EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
+
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+{
+ if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
+ return err;
+
+ walk->data -= walk->offset;
+
+ kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
@@ -116,374 +150,630 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk)
+/*
+ * For an ahash tfm that is using an shash algorithm (instead of an ahash
+ * algorithm), this returns the underlying shash tfm.
+ */
+static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
{
- walk->total = req->nbytes;
-
- if (!walk->total) {
- walk->entrylen = 0;
- return 0;
- }
+ return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
+}
- walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
- walk->sg = req->src;
- walk->flags = req->base.flags;
+static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
+ struct crypto_ahash *tfm)
+{
+ struct shash_desc *desc = ahash_request_ctx(req);
- return hash_walk_new_entry(walk);
+ desc->tfm = ahash_to_shash(tfm);
+ return desc;
}
-EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
+ struct crypto_hash_walk walk;
+ int nbytes;
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
+ for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
+ nbytes = crypto_hash_walk_done(&walk, nbytes))
+ nbytes = crypto_shash_update(desc, walk.data, nbytes);
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = tfm->setkey(tfm, alignbuffer, keylen);
- kfree_sensitive(buffer);
- return ret;
+ return nbytes;
}
+EXPORT_SYMBOL_GPL(shash_ahash_update);
-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
{
- return -ENOSYS;
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+ if (!nbytes)
+ return crypto_shash_final(desc, req->result);
+
+ do {
+ nbytes = crypto_hash_walk_last(&walk) ?
+ crypto_shash_finup(desc, walk.data, nbytes,
+ req->result) :
+ crypto_shash_update(desc, walk.data, nbytes);
+ nbytes = crypto_hash_walk_done(&walk, nbytes);
+ } while (nbytes > 0);
+
+ return nbytes;
}
+EXPORT_SYMBOL_GPL(shash_ahash_finup);
-static void ahash_set_needkey(struct crypto_ahash *tfm)
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+ unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
+ struct page *page;
+ const u8 *data;
+ int err;
- if (tfm->setkey != ahash_nosetkey &&
- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ data = req->svirt;
+ if (!nbytes || ahash_request_isvirt(req))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
+
+ sg = req->src;
+ if (nbytes > sg->length)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+
+ page = sg_page(sg);
+ offset = sg->offset;
+ data = lowmem_page_address(page) + offset;
+ if (!IS_ENABLED(CONFIG_HIGHMEM))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
+
+ page += offset >> PAGE_SHIFT;
+ offset = offset_in_page(offset);
+
+ if (nbytes > (unsigned int)PAGE_SIZE - offset)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+
+ data = kmap_local_page(page);
+ err = crypto_shash_digest(desc, data + offset, nbytes,
+ req->result);
+ kunmap_local(data);
+ return err;
}
+EXPORT_SYMBOL_GPL(shash_ahash_digest);
-int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int err;
+ struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
- if ((unsigned long)key & alignmask)
- err = ahash_setkey_unaligned(tfm, key, keylen);
- else
- err = tfm->setkey(tfm, key, keylen);
+ crypto_free_shash(*ctx);
+}
- if (unlikely(err)) {
- ahash_set_needkey(tfm);
- return err;
+static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
+ struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *shash;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ shash = crypto_create_tfm(calg, &crypto_shash_type);
+ if (IS_ERR(shash)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(shash);
}
- crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ crt->using_shash = true;
+ *ctx = shash;
+ tfm->exit = crypto_exit_ahash_using_shash;
+
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
+
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static inline unsigned int ahash_align_buffer_size(unsigned len,
- unsigned long mask)
+static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
{
- return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
+ return -ENOSYS;
}
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
-
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
- return -ENOMEM;
-
- /*
- * WARNING: Voodoo programming below!
- *
- * The code below is obscure and hard to understand, thus explanation
- * is necessary. See include/crypto/hash.h and include/linux/crypto.h
- * to understand the layout of structures used here!
- *
- * The code here will replace portions of the ORIGINAL request with
- * pointers to new code and buffers so the hashing operation can store
- * the result in aligned buffer. We will call the modified request
- * an ADJUSTED request.
- *
- * The newly mangled request will look as such:
- *
- * req {
- * .result = ADJUSTED[new aligned buffer]
- * .base.complete = ADJUSTED[pointer to completion function]
- * .base.data = ADJUSTED[*req (pointer to self)]
- * .priv = ADJUSTED[new priv] {
- * .result = ORIGINAL(result)
- * .complete = ORIGINAL(base.complete)
- * .data = ORIGINAL(base.data)
- * }
- */
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
- priv->flags = req->base.flags;
-
- /*
- * WARNING: We do not backup req->priv here! The req->priv
- * is for internal use of the Crypto API and the
- * user must _NOT_ _EVER_ depend on it's content!
- */
-
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = cplt;
- req->base.data = req;
- req->priv = priv;
+ if (alg->setkey != ahash_nosetkey &&
+ !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (likely(tfm->using_shash)) {
+ struct crypto_shash *shash = ahash_to_shash(tfm);
+ int err;
+
+ err = crypto_shash_setkey(shash, key, keylen);
+ if (unlikely(err)) {
+ crypto_ahash_set_flags(tfm,
+ crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
+ return err;
+ }
+ } else {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ int err;
+
+ err = alg->setkey(tfm, key, keylen);
+ if (!err && crypto_ahash_need_fallback(tfm))
+ err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
+ key, keylen);
+ if (unlikely(err)) {
+ ahash_set_needkey(tfm, alg);
+ return err;
+ }
+ }
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static void ahash_restore_req(struct ahash_request *req, int err)
+static int ahash_do_req_chain(struct ahash_request *req,
+ int (*const *op)(struct ahash_request *req))
{
- struct ahash_request_priv *priv = req->priv;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int err;
+
+ if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
+ return (*op)(req);
+
+ if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
+ return -ENOSYS;
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+ if (!crypto_ahash_need_fallback(tfm))
+ return -ENOSYS;
- /* Restore the original crypto request. */
- req->result = priv->result;
+ if (crypto_hash_no_export_core(tfm))
+ return -ENOSYS;
- ahash_request_set_callback(req, priv->flags,
- priv->complete, priv->data);
- req->priv = NULL;
+ {
+ u8 state[HASH_MAX_STATESIZE];
- /* Free the req->priv.priv from the ADJUSTED request. */
- kfree_sensitive(priv);
+ if (op == &crypto_ahash_alg(tfm)->digest) {
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = crypto_ahash_digest(req);
+ goto out_no_state;
+ }
+
+ err = crypto_ahash_export(req, state);
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = err ?: crypto_ahash_import(req, state);
+
+ if (op == &crypto_ahash_alg(tfm)->finup) {
+ err = err ?: crypto_ahash_finup(req);
+ goto out_no_state;
+ }
+
+ err = err ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, state);
+
+ ahash_request_set_tfm(req, tfm);
+ return err ?: crypto_ahash_import(req, state);
+
+out_no_state:
+ ahash_request_set_tfm(req, tfm);
+ return err;
+ }
}
-static void ahash_notify_einprogress(struct ahash_request *req)
+int crypto_ahash_init(struct ahash_request *req)
{
- struct ahash_request_priv *priv = req->priv;
- struct crypto_async_request oreq;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- oreq.data = priv->data;
+ if (likely(tfm->using_shash))
+ return crypto_shash_init(prepare_shash_desc(req, tfm));
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_ahash_block_only(tfm)) {
+ u8 *buf = ahash_request_ctx(req);
+
+ buf += crypto_ahash_reqsize(tfm) - 1;
+ *buf = 0;
+ }
+ return crypto_ahash_alg(tfm)->init(req);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_init);
- priv->complete(&oreq, -EINPROGRESS);
+static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+{
+ req->saved_complete = req->base.complete;
+ req->saved_data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = req;
}
-static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+static void ahash_restore_req(struct ahash_request *req)
{
- struct ahash_request *areq = req->data;
+ req->base.complete = req->saved_complete;
+ req->base.data = req->saved_data;
+}
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
+static int ahash_update_finish(struct ahash_request *req, int err)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+ u8 *buf;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
}
- /*
- * Restore the original request, see ahash_op_unaligned() for what
- * goes where.
- *
- * The "struct ahash_request *req" here is in fact the "req.base"
- * from the ADJUSTED request from ahash_op_unaligned(), thus as it
- * is a pointer to self, it is also the ADJUSTED "req" .
- */
+ req->nbytes += nonzero - blen;
- /* First copy req->result into req->priv.result */
- ahash_restore_req(areq, err);
+ blen = 0;
+ if (err >= 0) {
+ blen = err + nonzero;
+ err = 0;
+ }
+ if (ahash_request_isvirt(req))
+ memcpy(buf, req->svirt + req->nbytes - blen, blen);
+ else
+ memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen);
+ *blenp = blen;
- /* Complete the ORIGINAL request. */
- areq->base.complete(&areq->base, err);
+ ahash_restore_req(req);
+
+ return err;
}
-static int ahash_op_unaligned(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+static void ahash_update_done(void *data, int err)
{
- int err;
+ ahash_op_done(data, err, ahash_update_finish);
+}
- err = ahash_save_req(req, ahash_op_unaligned_done);
- if (err)
- return err;
+int crypto_ahash_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
+
+ if (likely(tfm->using_shash))
+ return shash_ahash_update(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen + req->nbytes < bs + nonzero) {
+ if (ahash_request_isvirt(req))
+ memcpy(buf + blen, req->svirt, req->nbytes);
+ else
+ memcpy_from_sglist(buf + blen, req->src, 0,
+ req->nbytes);
+
+ *blenp += req->nbytes;
+ return 0;
+ }
- err = op(req);
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+ req->nbytes -= nonzero;
+
+ ahash_save_req(req, ahash_update_done);
+
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
- ahash_restore_req(req, err);
-
- return err;
+ return ahash_update_finish(req, err);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_update);
-static int crypto_ahash_op(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+static int ahash_finup_finish(struct ahash_request *req, int err)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+
+ if (blen) {
+ if (sg_is_last(req->src))
+ req->src = NULL;
+ else {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
+ req->nbytes -= blen;
+ }
- if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op);
+ ahash_restore_req(req);
- return op(req);
+ return err;
}
-int crypto_ahash_final(struct ahash_request *req)
+static void ahash_finup_done(void *data, int err)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ ahash_op_done(data, err, ahash_finup_finish);
}
-EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
+
+ if (likely(tfm->using_shash))
+ return shash_ahash_finup(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_alg(tfm)->finup)
+ return ahash_def_finup(req);
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (!req->src)
+ sg_mark_end(req->sg_head);
+ else if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ ahash_save_req(req, ahash_finup_done);
+
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+ return ahash_finup_finish(req, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
- crypto_stats_get(alg);
+ if (likely(tfm->using_shash))
+ return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_ahash_op(req, tfm->digest);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ return -ENOKEY;
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_request *areq = req->data;
+ struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
- ahash_restore_req(areq, err);
-
- areq->base.complete(&areq->base, err);
+ ahash_restore_req(areq);
+ ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
if (err)
goto out;
req->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_reqtfm(req)->final(req);
+ err = crypto_ahash_alg(tfm)->final(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
- ahash_restore_req(req, err);
+ ahash_restore_req(req);
return err;
}
-static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_request *areq = req->data;
-
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
- }
-
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = ahash_def_finup_finish1(areq, err);
- if (areq->priv)
- return;
-
- areq->base.complete(&areq->base, err);
+ ahash_op_done(data, err, ahash_def_finup_finish1);
}
static int ahash_def_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
- err = ahash_save_req(req, ahash_def_finup_done1);
- if (err)
- return err;
+ ahash_save_req(req, ahash_def_finup_done1);
- err = tfm->update(req);
+ err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
}
+int crypto_ahash_export_core(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export_core(ahash_request_ctx(req), out);
+ return crypto_ahash_alg(tfm)->export_core(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export_core);
+
+int crypto_ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export(ahash_request_ctx(req), out);
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ memcpy(out + ss - plen, buf + reqsize - plen, plen);
+ }
+ return crypto_ahash_alg(tfm)->export(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export);
+
+int crypto_ahash_import_core(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import_core(prepare_shash_desc(req, tfm),
+ in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
+ return crypto_ahash_alg(tfm)->import_core(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
+
+int crypto_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import(prepare_shash_desc(req, tfm), in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ memcpy(buf + reqsize - plen, in + ss - plen, plen);
+ if (buf[reqsize - 1] >= plen)
+ return -EOVERFLOW;
+ }
+ return crypto_ahash_alg(tfm)->import(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import);
+
static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
- alg->exit_tfm(hash);
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+
+ if (crypto_ahash_need_fallback(hash))
+ crypto_free_ahash(crypto_ahash_fb(hash));
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
+ struct crypto_ahash *fb = NULL;
+ int err;
- hash->setkey = ahash_nosetkey;
+ crypto_ahash_set_statesize(hash, alg->halg.statesize);
+ crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
- if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
- return crypto_init_shash_ops_async(tfm);
+ if (tfm->__crt_alg->cra_type == &crypto_shash_type)
+ return crypto_init_ahash_using_shash(tfm);
- hash->init = alg->init;
- hash->update = alg->update;
- hash->final = alg->final;
- hash->finup = alg->finup ?: ahash_def_finup;
- hash->digest = alg->digest;
- hash->export = alg->export;
- hash->import = alg->import;
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
+ CRYPTO_ALG_REQ_VIRT,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_REQ_VIRT |
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
- if (alg->setkey) {
- hash->setkey = alg->setkey;
- ahash_set_needkey(hash);
+ tfm->fb = crypto_ahash_tfm(fb);
}
- if (alg->exit_tfm)
- tfm->exit = crypto_ahash_exit_tfm;
+ ahash_set_needkey(hash, alg);
+
+ tfm->exit = crypto_ahash_exit_tfm;
+
+ if (alg->init_tfm)
+ err = alg->init_tfm(hash);
+ else if (tfm->__crt_alg->cra_init)
+ err = tfm->__crt_alg->cra_init(tfm);
+ else
+ return 0;
- return alg->init_tfm ? alg->init_tfm(hash) : 0;
+ if (err)
+ goto out_free_sync_hash;
+
+ if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) >
+ MAX_SYNC_HASH_REQSIZE)
+ goto out_exit_tfm;
+
+ BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE);
+ if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE)
+ crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE);
+
+ return 0;
+
+out_exit_tfm:
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+ err = -EINVAL;
+out_free_sync_hash:
+ crypto_free_ahash(fb);
+ return err;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
- if (alg->cra_type != &crypto_ahash_type)
+ if (alg->cra_type == &crypto_shash_type)
return sizeof(struct crypto_shash *);
return crypto_alg_extsize(alg);
@@ -496,8 +786,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
ahash->free(ahash);
}
-#ifdef CONFIG_NET
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_ahash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
@@ -510,20 +800,14 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
@@ -536,11 +820,14 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_ahash_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
+ .algsize = offsetof(struct ahash_alg, halg.base),
};
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
@@ -565,19 +852,146 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type == &crypto_shash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
+{
+ struct hash_alg_common *halg = crypto_hash_alg_common(hash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *fb = NULL;
+ struct crypto_ahash *nhash;
+ struct ahash_alg *alg;
+ int err;
+
+ if (!crypto_hash_alg_has_setkey(halg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
+
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->reqsize = hash->reqsize;
+ nhash->statesize = hash->statesize;
+
+ if (likely(hash->using_shash)) {
+ struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
+ struct crypto_shash *shash;
+
+ shash = crypto_clone_shash(ahash_to_shash(hash));
+ if (IS_ERR(shash)) {
+ err = PTR_ERR(shash);
+ goto out_free_nhash;
+ }
+ crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash;
+ nhash->using_shash = true;
+ *nctx = shash;
+ return nhash;
+ }
+
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_clone_ahash(crypto_ahash_fb(hash));
+ err = PTR_ERR(fb);
+ if (IS_ERR(fb))
+ goto out_free_nhash;
+
+ crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb);
+ }
+
+ err = -ENOSYS;
+ alg = crypto_ahash_alg(hash);
+ if (!alg->clone_tfm)
+ goto out_free_fb;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err)
+ goto out_free_fb;
+
+ crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm;
+
+ return nhash;
+
+out_free_fb:
+ crypto_free_ahash(fb);
+out_free_nhash:
+ crypto_free_ahash(nhash);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+
+static int ahash_default_export_core(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_default_import_core(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ if (alg->halg.statesize == 0)
+ return -EINVAL;
- if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
- alg->halg.statesize > HASH_MAX_STATESIZE ||
- alg->halg.statesize == 0)
+ if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
return -EINVAL;
+ if (!(base->cra_flags & CRYPTO_ALG_ASYNC) &&
+ base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
+ return -EINVAL;
+
+ if (base->cra_flags & CRYPTO_ALG_NEED_FALLBACK &&
+ base->cra_flags & CRYPTO_ALG_NO_FALLBACK)
+ return -EINVAL;
+
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_ahash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+ if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT) &&
+ !(base->cra_flags & CRYPTO_ALG_NO_FALLBACK))
+ base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+
+ if (!alg->setkey)
+ alg->setkey = ahash_nosetkey;
+
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ if (!alg->finup)
+ return -EINVAL;
+
+ base->cra_reqsize += base->cra_blocksize + 1;
+ alg->halg.statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = ahash_default_export_core;
+ alg->import_core = ahash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
return 0;
}
@@ -645,16 +1059,42 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
-bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+void ahash_request_free(struct ahash_request *req)
{
- struct crypto_alg *alg = &halg->base;
+ if (unlikely(!req))
+ return;
- if (alg->cra_type != &crypto_ahash_type)
- return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+ if (!ahash_req_on_stack(req)) {
+ kfree(req);
+ return;
+ }
- return __crypto_ahash_alg(alg)->setkey != NULL;
+ ahash_request_zero(req);
}
-EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+EXPORT_SYMBOL_GPL(ahash_request_free);
+
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
+ int err;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, out, len);
+ err = crypto_ahash_digest(req);
+
+ ahash_request_zero(req);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_digest);
+
+void ahash_free_singlespawn_instance(struct ahash_instance *inst)
+{
+ crypto_drop_spawn(ahash_instance_ctx(inst));
+ kfree(inst);
+}
+EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index ab975a420e1e..a36f50c83827 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -5,23 +5,36 @@
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
+#include <crypto/internal/akcipher.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <crypto/akcipher.h>
-#include <crypto/internal/akcipher.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+
+struct crypto_akcipher_sync_data {
+ struct crypto_akcipher *tfm;
+ const void *src;
+ void *dst;
+ unsigned int slen;
+ unsigned int dlen;
+
+ struct akcipher_request *req;
+ struct crypto_wait cwait;
+ struct scatterlist sg;
+ u8 *buf;
+};
+
+static int __maybe_unused crypto_akcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -32,12 +45,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
-#else
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -83,11 +90,14 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
+ .algsize = offsetof(struct akcipher_alg, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
@@ -130,10 +140,6 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
- if (!alg->sign)
- alg->sign = akcipher_default_op;
- if (!alg->verify)
- alg->verify = akcipher_default_op;
if (!alg->encrypt)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
@@ -162,5 +168,89 @@ int akcipher_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
+static int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
+{
+ unsigned int reqsize = crypto_akcipher_reqsize(data->tfm);
+ struct akcipher_request *req;
+ struct scatterlist *sg;
+ unsigned int mlen;
+ unsigned int len;
+ u8 *buf;
+
+ mlen = max(data->slen, data->dlen);
+
+ len = sizeof(*req) + reqsize + mlen;
+ if (len < mlen)
+ return -EOVERFLOW;
+
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ data->req = req;
+ akcipher_request_set_tfm(req, data->tfm);
+
+ buf = (u8 *)(req + 1) + reqsize;
+ data->buf = buf;
+ memcpy(buf, data->src, data->slen);
+
+ sg = &data->sg;
+ sg_init_one(sg, buf, mlen);
+ akcipher_request_set_crypt(req, sg, sg, data->slen, data->dlen);
+
+ crypto_init_wait(&data->cwait);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &data->cwait);
+
+ return 0;
+}
+
+static int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data,
+ int err)
+{
+ err = crypto_wait_req(err, &data->cwait);
+ memcpy(data->dst, data->buf, data->dlen);
+ data->dlen = data->req->dst_len;
+ kfree_sensitive(data->req);
+ return err;
+}
+
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct crypto_akcipher_sync_data data = {
+ .tfm = tfm,
+ .src = src,
+ .dst = dst,
+ .slen = slen,
+ .dlen = dlen,
+ };
+
+ return crypto_akcipher_sync_prep(&data) ?:
+ crypto_akcipher_sync_post(&data,
+ crypto_akcipher_encrypt(data.req));
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt);
+
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct crypto_akcipher_sync_data data = {
+ .tfm = tfm,
+ .src = src,
+ .dst = dst,
+ .slen = slen,
+ .dlen = dlen,
+ };
+
+ return crypto_akcipher_sync_prep(&data) ?:
+ crypto_akcipher_sync_post(&data,
+ crypto_akcipher_decrypt(data.req)) ?:
+ data.dlen;
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5c69ff8e8fa5..e604d0d8b7b4 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fips.h>
@@ -17,16 +16,12 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "internal.h"
static LIST_HEAD(crypto_template_list);
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
-EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
-#endif
-
static inline void crypto_check_module_sig(struct module *mod)
{
if (fips_enabled && mod && !module_sig_ok(mod))
@@ -74,13 +69,36 @@ static void crypto_free_instance(struct crypto_instance *inst)
inst->alg.cra_type->free(inst);
}
+static void crypto_destroy_instance_workfn(struct work_struct *w)
+{
+ struct crypto_template *tmpl = container_of(w, struct crypto_template,
+ free_work);
+ struct crypto_instance *inst;
+ struct hlist_node *n;
+ HLIST_HEAD(list);
+
+ down_write(&crypto_alg_sem);
+ hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) {
+ if (refcount_read(&inst->alg.cra_refcnt) != -1)
+ continue;
+ hlist_del(&inst->list);
+ hlist_add_head(&inst->list, &list);
+ }
+ up_write(&crypto_alg_sem);
+
+ hlist_for_each_entry_safe(inst, n, &list, list)
+ crypto_free_instance(inst);
+}
+
static void crypto_destroy_instance(struct crypto_alg *alg)
{
- struct crypto_instance *inst = (void *)alg;
+ struct crypto_instance *inst = container_of(alg,
+ struct crypto_instance,
+ alg);
struct crypto_template *tmpl = inst->tmpl;
- crypto_free_instance(inst);
- crypto_tmpl_put(tmpl);
+ refcount_set(&alg->cra_refcnt, -1);
+ schedule_work(&tmpl->free_work);
}
/*
@@ -126,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst,
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (!tmpl || !crypto_tmpl_get(tmpl))
+ if (!tmpl)
return;
- list_move(&inst->alg.cra_list, list);
+ list_del_init(&inst->alg.cra_list);
hlist_del(&inst->list);
- inst->alg.cra_destroy = crypto_destroy_instance;
+ hlist_add_head(&inst->list, &tmpl->dead);
BUG_ON(!list_empty(&inst->alg.cra_users));
+
+ crypto_alg_put(&inst->alg);
}
/*
@@ -222,12 +242,41 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
}
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
+static void crypto_alg_finish_registration(struct crypto_alg *alg,
+ struct list_head *algs_to_put)
+{
+ struct crypto_alg *q;
+
+ list_for_each_entry(q, &crypto_alg_list, cra_list) {
+ if (q == alg)
+ continue;
+
+ if (crypto_is_moribund(q))
+ continue;
+
+ if (crypto_is_larval(q))
+ continue;
+
+ if (strcmp(alg->cra_name, q->cra_name))
+ continue;
+
+ if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
+ q->cra_priority > alg->cra_priority)
+ continue;
+
+ crypto_remove_spawns(q, algs_to_put, alg);
+ }
+
+ crypto_notify(CRYPTO_MSG_ALG_LOADED, alg);
+}
+
static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
{
struct crypto_larval *larval;
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER))
- return NULL;
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) ||
+ (alg->cra_flags & CRYPTO_ALG_INTERNAL))
+ return NULL; /* No self-test needed */
larval = crypto_larval_alloc(alg->cra_name,
alg->cra_flags | CRYPTO_ALG_TESTED, 0);
@@ -248,7 +297,8 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
return larval;
}
-static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
+static struct crypto_larval *
+__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
{
struct crypto_alg *q;
struct crypto_larval *larval;
@@ -259,9 +309,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
INIT_LIST_HEAD(&alg->cra_users);
- /* No cheating! */
- alg->cra_flags &= ~CRYPTO_ALG_TESTED;
-
ret = -EEXIST;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
@@ -278,6 +325,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
+ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}
@@ -288,12 +336,15 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list);
- if (larval)
+ if (larval) {
+ /* No cheating! */
+ alg->cra_flags &= ~CRYPTO_ALG_TESTED;
+
list_add(&larval->alg.cra_list, &crypto_alg_list);
- else
+ } else {
alg->cra_flags |= CRYPTO_ALG_TESTED;
-
- crypto_stats_init(alg);
+ crypto_alg_finish_registration(alg, algs_to_put);
+ }
out:
return larval;
@@ -309,7 +360,6 @@ void crypto_alg_tested(const char *name, int err)
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
- bool best;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
@@ -323,13 +373,14 @@ void crypto_alg_tested(const char *name, int err)
}
pr_err("alg: Unexpected test result for %s: %d\n", name, err);
- goto unlock;
+ up_write(&crypto_alg_sem);
+ return;
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
- if (list_empty(&alg->cra_list))
+ if (crypto_is_dead(alg))
goto complete;
if (err == -ECANCELED)
@@ -341,69 +392,15 @@ found:
alg->cra_flags |= CRYPTO_ALG_TESTED;
- /* Only satisfy larval waiters if we are the best. */
- best = true;
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (crypto_is_moribund(q) || !crypto_is_larval(q))
- continue;
-
- if (strcmp(alg->cra_name, q->cra_name))
- continue;
-
- if (q->cra_priority > alg->cra_priority) {
- best = false;
- break;
- }
- }
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (q == alg)
- continue;
-
- if (crypto_is_moribund(q))
- continue;
-
- if (crypto_is_larval(q)) {
- struct crypto_larval *larval = (void *)q;
-
- /*
- * Check to see if either our generic name or
- * specific name can satisfy the name requested
- * by the larval entry q.
- */
- if (strcmp(alg->cra_name, q->cra_name) &&
- strcmp(alg->cra_driver_name, q->cra_name))
- continue;
-
- if (larval->adult)
- continue;
- if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
- continue;
-
- if (best && crypto_mod_get(alg))
- larval->adult = alg;
- else
- larval->adult = ERR_PTR(-EAGAIN);
-
- continue;
- }
-
- if (strcmp(alg->cra_name, q->cra_name))
- continue;
-
- if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
- q->cra_priority > alg->cra_priority)
- continue;
-
- crypto_remove_spawns(q, &list, alg);
- }
+ crypto_alg_finish_registration(alg, &list);
complete:
+ list_del_init(&test->alg.cra_list);
complete_all(&test->completion);
-unlock:
up_write(&crypto_alg_sem);
+ crypto_alg_put(&test->alg);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
@@ -420,10 +417,20 @@ void crypto_remove_final(struct list_head *list)
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
+static void crypto_free_alg(struct crypto_alg *alg)
+{
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ crypto_destroy_alg(alg);
+ kfree(p);
+}
+
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
- bool test_started;
+ bool test_started = false;
+ LIST_HEAD(algs_to_put);
int err;
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
@@ -431,18 +438,37 @@ int crypto_register_alg(struct crypto_alg *alg)
if (err)
return err;
+ if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST &&
+ !WARN_ON_ONCE(alg->cra_destroy)) {
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ alg = (void *)(p + algsize);
+ alg->cra_destroy = crypto_free_alg;
+ }
+
down_write(&crypto_alg_sem);
- larval = __crypto_register_alg(alg);
- test_started = static_key_enabled(&crypto_boot_test_finished);
- if (!IS_ERR_OR_NULL(larval))
+ larval = __crypto_register_alg(alg, &algs_to_put);
+ if (!IS_ERR_OR_NULL(larval)) {
+ test_started = crypto_boot_test_finished();
larval->test_started = test_started;
+ }
up_write(&crypto_alg_sem);
- if (IS_ERR_OR_NULL(larval))
+ if (IS_ERR(larval)) {
+ crypto_alg_put(alg);
return PTR_ERR(larval);
+ }
if (test_started)
- crypto_wait_for_test(larval);
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
@@ -472,10 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
- if (alg->cra_destroy)
- alg->cra_destroy(alg);
+ WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1);
+ list_add(&alg->cra_list, &list);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
@@ -514,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
+ INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn);
+
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@@ -575,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl)
crypto_free_instance(inst);
}
crypto_remove_final(&users);
+
+ flush_work(&tmpl->free_work);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
@@ -619,6 +648,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_larval *larval;
struct crypto_spawn *spawn;
u32 fips_internal = 0;
+ LIST_HEAD(algs_to_put);
int err;
err = crypto_check_alg(&inst->alg);
@@ -627,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ inst->alg.cra_destroy = crypto_destroy_instance;
down_write(&crypto_alg_sem);
@@ -650,7 +681,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL);
- larval = __crypto_register_alg(&inst->alg);
+ larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
else if (larval)
@@ -662,15 +693,15 @@ int crypto_register_instance(struct crypto_template *tmpl,
unlock:
up_write(&crypto_alg_sem);
- err = PTR_ERR(larval);
- if (IS_ERR_OR_NULL(larval))
- goto err;
+ if (IS_ERR(larval))
+ return PTR_ERR(larval);
- crypto_wait_for_test(larval);
- err = 0;
+ if (larval)
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
-err:
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
@@ -892,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg)
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_inst_setname);
+EXPORT_SYMBOL_GPL(__crypto_inst_setname);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
@@ -942,6 +973,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void crypto_enqueue_request_head(struct crypto_queue *queue,
struct crypto_async_request *request)
{
+ if (unlikely(queue->qlen >= queue->max_qlen))
+ queue->backlog = queue->backlog->prev;
+
queue->qlen++;
list_add(&request->list, &queue->list);
}
@@ -960,7 +994,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
queue->backlog = queue->backlog->next;
request = queue->list.next;
- list_del(request);
+ list_del_init(request);
return list_entry(request, struct crypto_async_request, list);
}
@@ -1019,221 +1053,16 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg)
-{
- memset(&alg->stats, 0, sizeof(alg->stats));
-}
-EXPORT_SYMBOL_GPL(crypto_stats_init);
-
-void crypto_stats_get(struct crypto_alg *alg)
-{
- crypto_alg_get(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_get);
-
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
-
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
-
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.sign_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
-
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.verify_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
-
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.compress_cnt);
- atomic64_add(slen, &alg->stats.compress.compress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_compress);
-
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.decompress_cnt);
- atomic64_add(slen, &alg->stats.compress.decompress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_decompress);
-
-void crypto_stats_ahash_update(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.hash.err_cnt);
- else
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
-
-void crypto_stats_ahash_final(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.hash.err_cnt);
- } else {
- atomic64_inc(&alg->stats.hash.hash_cnt);
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
-
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.setsecret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
-
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
-
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
-
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.rng.err_cnt);
- else
- atomic64_inc(&alg->stats.rng.seed_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
-
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
- int ret)
+static void __init crypto_start_tests(void)
{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.rng.err_cnt);
- } else {
- atomic64_inc(&alg->stats.rng.generate_cnt);
- atomic64_add(dlen, &alg->stats.rng.generate_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
+ if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI))
+ return;
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
+ return;
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
-#endif
+ set_crypto_boot_test_finished();
-static void __init crypto_start_tests(void)
-{
for (;;) {
struct crypto_larval *larval = NULL;
struct crypto_alg *q;
@@ -1264,10 +1093,8 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
- crypto_wait_for_test(larval);
+ crypto_schedule_test(larval);
}
-
- static_branch_enable(&crypto_boot_test_finished);
}
static int __init crypto_algapi_init(void)
diff --git a/crypto/algboss.c b/crypto/algboss.c
index eb5fe84efb83..846f586889ee 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
- int err;
+ int err = -ENOENT;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
@@ -64,6 +64,8 @@ static int cryptomgr_probe(void *data)
crypto_tmpl_put(tmpl);
out:
+ param->larval->adult = ERR_PTR(err);
+ param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD;
complete_all(&param->larval->completion);
crypto_alg_put(&param->larval->alg);
kfree(param);
@@ -138,9 +140,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
goto err_free_param;
}
- if (!i)
- goto err_free_param;
-
param->tb[i + 1] = NULL;
param->type.attr.rta_len = sizeof(param->type);
@@ -175,18 +174,10 @@ static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
- int err = 0;
-
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
- goto skiptest;
-#endif
-
- if (type & CRYPTO_ALG_TESTED)
- goto skiptest;
+ int err;
err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
-skiptest:
crypto_alg_tested(param->driver, err);
kfree(param);
@@ -197,7 +188,9 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
{
struct task_struct *thread;
struct crypto_test_param *param;
- u32 type;
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
+ return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
goto err;
@@ -208,13 +201,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
- type = alg->cra_flags;
-
- /* Do not test internal algorithms. */
- if (type & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_TESTED;
-
- param->type = type;
+ param->type = alg->cra_flags;
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
if (IS_ERR(thread))
@@ -260,13 +247,7 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-/*
- * This is arch_initcall() so that the crypto self-tests are run on algorithms
- * registered early by subsys_initcall(). subsys_initcall() is needed for
- * generic implementations so that they're available for comparison tests when
- * other implementations are registered later by module_init().
- */
-arch_initcall(cryptomgr_init);
+module_init(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 42493b4d8ce4..79b016a899a1 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -9,10 +9,10 @@
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
- * filled by user space with the data submitted via sendpage/sendmsg. Filling
- * up the TX SGL does not cause a crypto operation -- the data will only be
- * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
- * provide a buffer which is tracked with the RX SGL.
+ * filled by user space with the data submitted via sendmsg (maybe with
+ * MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation
+ * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
+ * call, the caller must provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
@@ -27,7 +27,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
-#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@@ -36,19 +35,13 @@
#include <linux/net.h>
#include <net/sock.h>
-struct aead_tfm {
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-};
-
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
/*
@@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
- struct scatterlist *src,
- struct scatterlist *dst, unsigned int len)
-{
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
-
- skcipher_request_set_sync_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(skreq, src, dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_aead *tfm = pask->private;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -113,19 +89,19 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
}
/*
- * Data length provided by caller via sendmsg/sendpage that has not
- * yet been processed.
+ * Data length provided by caller via sendmsg that has not yet been
+ * processed.
*/
used = ctx->used;
/*
- * Make sure sufficient data is present -- note, the same check is
- * also present in sendmsg/sendpage. The checks in sendpage/sendmsg
- * shall provide an information to the data sender that something is
- * wrong, but they are irrelevant to maintain the kernel integrity.
- * We need this check here too in case user space decides to not honor
- * the error message in sendmsg/sendpage and still call recvmsg. This
- * check here protects the kernel integrity.
+ * Make sure sufficient data is present -- note, the same check is also
+ * present in sendmsg. The checks in sendmsg shall provide an
+ * information to the data sender that something is wrong, but they are
+ * irrelevant to maintain the kernel integrity. We need this check
+ * here too in case user space decides to not honor the error message
+ * in sendmsg and still call recvmsg. This check here protects the
+ * kernel integrity.
*/
if (!aead_sufficient_data(sk))
return -EINVAL;
@@ -210,7 +186,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- rsgl_src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
if (ctx->enc) {
/*
@@ -223,10 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
- if (err)
- goto free;
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
+ processed);
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
@@ -240,11 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || CT ----+
*/
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
- if (err)
- goto free;
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
@@ -267,10 +238,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
if (usedpages) {
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+ struct scatterlist *sg = sgl_prev->sgt.sgl;
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
- areq->tsgl);
+ sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
+ sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
rsgl_src = areq->tsgl;
@@ -278,7 +249,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
- areq->first_rsgl.sgl.sg, used, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@@ -368,7 +339,6 @@ static struct proto_ops algif_aead_ops = {
.release = af_alg_release,
.sendmsg = aead_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
.poll = af_alg_poll,
};
@@ -378,7 +348,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct aead_tfm *tfm;
+ struct crypto_aead *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -392,7 +362,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
@@ -420,18 +390,6 @@ static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return aead_sendmsg(sock, msg, size);
}
-static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = aead_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -459,61 +417,28 @@ static struct proto_ops algif_aead_ops_nokey = {
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
- .sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- struct aead_tfm *tfm;
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- aead = crypto_alloc_aead(name, type, mask);
- if (IS_ERR(aead)) {
- kfree(tfm);
- return ERR_CAST(aead);
- }
-
- null_tfm = crypto_get_default_null_skcipher();
- if (IS_ERR(null_tfm)) {
- crypto_free_aead(aead);
- kfree(tfm);
- return ERR_CAST(null_tfm);
- }
-
- tfm->aead = aead;
- tfm->null_tfm = null_tfm;
-
- return tfm;
+ return crypto_alloc_aead(name, type, mask);
}
static void aead_release(void *private)
{
- struct aead_tfm *tfm = private;
-
- crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher();
- kfree(tfm);
+ crypto_free_aead(private);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setauthsize(tfm->aead, authsize);
+ return crypto_aead_setauthsize(private, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setkey(tfm->aead, key, keylen);
+ return crypto_aead_setkey(private, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -522,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
@@ -536,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct aead_tfm *tfm = private;
- struct crypto_aead *aead = tfm->aead;
+ struct crypto_aead *tfm = private;
unsigned int len = sizeof(*ctx);
- unsigned int ivlen = crypto_aead_ivsize(aead);
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -566,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
- struct aead_tfm *tfm = private;
+ struct crypto_aead *tfm = private;
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 1d017ec5c63c..4d3dfc60a16a 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -63,122 +63,117 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
- int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- long copied = 0;
+ ssize_t copied = 0;
+ size_t len, max_pages, npages;
+ bool continuing, need_init = false;
int err;
- if (limit > sk->sk_sndbuf)
- limit = sk->sk_sndbuf;
+ max_pages = min_t(size_t, ALG_MAX_PAGES,
+ DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk);
- if (!ctx->more) {
- if ((msg->msg_flags & MSG_MORE))
- hash_free_result(sk, ctx);
+ continuing = ctx->more;
- err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
- if (err)
- goto unlock;
- }
-
- ctx->more = false;
-
- while (msg_data_left(msg)) {
- int len = msg_data_left(msg);
-
- if (len > limit)
- len = limit;
-
- len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
- if (len < 0) {
- err = copied ? 0 : len;
- goto unlock;
- }
-
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
-
- err = crypto_wait_req(crypto_ahash_update(&ctx->req),
- &ctx->wait);
- af_alg_free_sg(&ctx->sgl);
- if (err) {
- iov_iter_revert(&msg->msg_iter, len);
- goto unlock;
+ if (!continuing) {
+ /* Discard a previous request that wasn't marked MSG_MORE. */
+ hash_free_result(sk, ctx);
+ if (!msg_data_left(msg))
+ goto done; /* Zero-length; don't start new req */
+ need_init = true;
+ } else if (!msg_data_left(msg)) {
+ /*
+ * No data - finalise the prev req if MSG_MORE so any error
+ * comes out here.
+ */
+ if (!(msg->msg_flags & MSG_MORE)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock_free_result;
+ ahash_request_set_crypt(&ctx->req, NULL,
+ ctx->result, 0);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
+ if (err)
+ goto unlock_free_result;
}
-
- copied += len;
+ goto done_more;
}
- err = 0;
-
- ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more) {
- err = hash_alloc_result(sk, ctx);
- if (err)
- goto unlock;
-
- ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = crypto_wait_req(crypto_ahash_final(&ctx->req),
- &ctx->wait);
- }
+ while (msg_data_left(msg)) {
+ ctx->sgl.sgt.sgl = ctx->sgl.sgl;
+ ctx->sgl.sgt.nents = 0;
+ ctx->sgl.sgt.orig_nents = 0;
-unlock:
- release_sock(sk);
+ err = -EIO;
+ npages = iov_iter_npages(&msg->msg_iter, max_pages);
+ if (npages == 0)
+ goto unlock_free;
- return err ?: copied;
-}
+ sg_init_table(ctx->sgl.sgl, npages);
-static ssize_t hash_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct hash_ctx *ctx = ask->private;
- int err;
+ ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
+ err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX,
+ &ctx->sgl.sgt, npages, 0);
+ if (err < 0)
+ goto unlock_free;
+ len = err;
+ sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1);
- lock_sock(sk);
- sg_init_table(ctx->sgl.sg, 1);
- sg_set_page(ctx->sgl.sg, page, size, offset);
-
- if (!(flags & MSG_MORE)) {
- err = hash_alloc_result(sk, ctx);
- if (err)
- goto unlock;
- } else if (!ctx->more)
- hash_free_result(sk, ctx);
+ if (!msg_data_left(msg)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock_free;
+ }
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl,
+ ctx->result, len);
- if (!(flags & MSG_MORE)) {
- if (ctx->more)
- err = crypto_ahash_finup(&ctx->req);
- else
+ if (!msg_data_left(msg) && !continuing &&
+ !(msg->msg_flags & MSG_MORE)) {
err = crypto_ahash_digest(&ctx->req);
- } else {
- if (!ctx->more) {
- err = crypto_ahash_init(&ctx->req);
- err = crypto_wait_req(err, &ctx->wait);
- if (err)
- goto unlock;
+ } else {
+ if (need_init) {
+ err = crypto_wait_req(
+ crypto_ahash_init(&ctx->req),
+ &ctx->wait);
+ if (err)
+ goto unlock_free;
+ need_init = false;
+ }
+
+ if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE))
+ err = crypto_ahash_update(&ctx->req);
+ else
+ err = crypto_ahash_finup(&ctx->req);
+ continuing = true;
}
- err = crypto_ahash_update(&ctx->req);
- }
-
- err = crypto_wait_req(err, &ctx->wait);
- if (err)
- goto unlock;
+ err = crypto_wait_req(err, &ctx->wait);
+ if (err)
+ goto unlock_free;
- ctx->more = flags & MSG_MORE;
+ copied += len;
+ af_alg_free_sg(&ctx->sgl);
+ }
+done_more:
+ ctx->more = msg->msg_flags & MSG_MORE;
+done:
+ err = 0;
unlock:
release_sock(sk);
+ return copied ?: err;
- return err ?: size;
+unlock_free:
+ af_alg_free_sg(&ctx->sgl);
+unlock_free_result:
+ hash_free_result(sk, ctx);
+ ctx->more = false;
+ goto unlock;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -228,31 +223,38 @@ unlock:
return err ?: len;
}
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int hash_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[HASH_MAX_STATESIZE];
+ struct crypto_ahash *tfm;
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
+ char *state;
bool more;
int err;
+ tfm = crypto_ahash_reqtfm(req);
+ state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!state)
+ goto out;
+
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
- return err;
+ goto out_free_state;
- err = af_alg_accept(ask->parent, newsock, kern);
+ err = af_alg_accept(ask->parent, newsock, arg);
if (err)
- return err;
+ goto out_free_state;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
@@ -260,14 +262,14 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
ctx2->more = more;
if (!more)
- return err;
+ goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
- if (err) {
- sock_orphan(sk2);
- sock_put(sk2);
- }
+out_free_state:
+ kfree_sensitive(state);
+
+out:
return err;
}
@@ -285,7 +287,6 @@ static struct proto_ops algif_hash_ops = {
.release = af_alg_release,
.sendmsg = hash_sendmsg,
- .sendpage = hash_sendpage,
.recvmsg = hash_recvmsg,
.accept = hash_accept,
};
@@ -337,18 +338,6 @@ static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return hash_sendmsg(sock, msg, size);
}
-static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = hash_check_key(sock);
- if (err)
- return err;
-
- return hash_sendpage(sock, page, offset, size, flags);
-}
-
static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -362,7 +351,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
int err;
@@ -370,7 +359,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
if (err)
return err;
- return hash_accept(sock, newsock, flags, kern);
+ return hash_accept(sock, newsock, arg);
}
static struct proto_ops algif_hash_ops_nokey = {
@@ -387,7 +376,6 @@ static struct proto_ops algif_hash_ops_nokey = {
.release = af_alg_release,
.sendmsg = hash_sendmsg_nokey,
- .sendpage = hash_sendpage_nokey,
.recvmsg = hash_recvmsg_nokey,
.accept = hash_accept_nokey,
};
@@ -428,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
- ctx->result = NULL;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
@@ -479,4 +466,5 @@ static void __exit algif_hash_exit(void)
module_init(algif_hash_init);
module_exit(algif_hash_exit);
+MODULE_DESCRIPTION("Userspace interface for hash algorithms");
MODULE_LICENSE("GPL");
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 407408c43730..1a86e40c8372 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -174,7 +174,6 @@ static struct proto_ops algif_rng_ops = {
.bind = sock_no_bind,
.accept = sock_no_accept,
.sendmsg = sock_no_sendmsg,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_recvmsg,
@@ -192,7 +191,6 @@ static struct proto_ops __maybe_unused algif_rng_test_ops = {
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_test_recvmsg,
@@ -250,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->addtl = NULL;
- ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ee8890ee8f33..125d395c5e00 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -9,10 +9,10 @@
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
- * filled by user space with the data submitted via sendpage/sendmsg. Filling
- * up the TX SGL does not cause a crypto operation -- the data will only be
- * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
- * provide a buffer which is tracked with the RX SGL.
+ * filled by user space with the data submitted via sendmsg. Filling up the TX
+ * SGL does not cause a crypto operation -- the data will only be tracked by
+ * the kernel. Upon receipt of one recvmsg call, the caller must provide a
+ * buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
@@ -47,6 +47,52 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
return af_alg_sendmsg(sock, msg, size, ivsize);
}
+static int algif_skcipher_export(struct sock *sk, struct skcipher_request *req)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct crypto_skcipher *tfm;
+ struct af_alg_ctx *ctx;
+ struct alg_sock *pask;
+ unsigned statesize;
+ struct sock *psk;
+ int err;
+
+ if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
+ return 0;
+
+ ctx = ask->private;
+ psk = ask->parent;
+ pask = alg_sk(psk);
+ tfm = pask->private;
+
+ statesize = crypto_skcipher_statesize(tfm);
+ ctx->state = sock_kmalloc(sk, statesize, GFP_ATOMIC);
+ if (!ctx->state)
+ return -ENOMEM;
+
+ err = crypto_skcipher_export(req, ctx->state);
+ if (err) {
+ sock_kzfree_s(sk, ctx->state, statesize);
+ ctx->state = NULL;
+ }
+
+ return err;
+}
+
+static void algif_skcipher_done(void *data, int err)
+{
+ struct af_alg_async_req *areq = data;
+ struct sock *sk = areq->sk;
+
+ if (err)
+ goto out;
+
+ err = algif_skcipher_export(sk, &areq->cra_u.skcipher_req);
+
+out:
+ af_alg_async_cb(data, err);
+}
+
static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -58,6 +104,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_chunksize(tfm);
struct af_alg_async_req *areq;
+ unsigned cflags = 0;
int err = 0;
size_t len = 0;
@@ -82,8 +129,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
* If more buffers are to be expected to be processed, process only
* full block size buffers.
*/
- if (ctx->more || len < ctx->used)
+ if (ctx->more || len < ctx->used) {
len -= len % bs;
+ cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL;
+ }
/*
* Create a per request TX SGL for this request which tracks the
@@ -105,7 +154,17 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
- areq->first_rsgl.sgl.sg, len, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
+
+ if (ctx->state) {
+ err = crypto_skcipher_import(&areq->cra_u.skcipher_req,
+ ctx->state);
+ sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
+ ctx->state = NULL;
+ if (err)
+ goto free;
+ cflags |= CRYPTO_SKCIPHER_REQ_CONT;
+ }
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
@@ -116,8 +175,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
areq->outlen = len;
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ cflags |
CRYPTO_TFM_REQ_MAY_SLEEP,
- af_alg_async_cb, areq);
+ algif_skcipher_done, areq);
err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
@@ -130,6 +190,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
} else {
/* Synchronous operation */
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ cflags |
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
@@ -137,8 +198,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
&ctx->wait);
- }
+ if (!err)
+ err = algif_skcipher_export(
+ sk, &areq->cra_u.skcipher_req);
+ }
free:
af_alg_free_resources(areq);
@@ -194,7 +258,6 @@ static struct proto_ops algif_skcipher_ops = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
.poll = af_alg_poll,
};
@@ -246,18 +309,6 @@ static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
return skcipher_sendmsg(sock, msg, size);
}
-static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = skcipher_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -285,7 +336,6 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg_nokey,
- .sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll = af_alg_poll,
};
@@ -315,6 +365,8 @@ static void skcipher_sock_destruct(struct sock *sk)
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
+ if (ctx->state)
+ sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -385,4 +437,5 @@ static void __exit algif_skcipher_exit(void)
module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
+MODULE_DESCRIPTION("Userspace interface for skcipher algorithms");
MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
deleted file mode 100644
index 3f512efaba3a..000000000000
--- a/crypto/ansi_cprng.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PRNG: Pseudo Random Number Generator
- * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
- * AES 128 cipher
- *
- * (C) Neil Horman <nhorman@tuxdriver.com>
- */
-
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/rng.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-
-#define DEFAULT_PRNG_KEY "0123456789abcdef"
-#define DEFAULT_PRNG_KSZ 16
-#define DEFAULT_BLK_SZ 16
-#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
-
-/*
- * Flags for the prng_context flags field
- */
-
-#define PRNG_FIXED_SIZE 0x1
-#define PRNG_NEED_RESET 0x2
-
-/*
- * Note: DT is our counter value
- * I is our intermediate value
- * V is our seed vector
- * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
- * for implementation details
- */
-
-
-struct prng_context {
- spinlock_t prng_lock;
- unsigned char rand_data[DEFAULT_BLK_SZ];
- unsigned char last_rand_data[DEFAULT_BLK_SZ];
- unsigned char DT[DEFAULT_BLK_SZ];
- unsigned char I[DEFAULT_BLK_SZ];
- unsigned char V[DEFAULT_BLK_SZ];
- u32 rand_data_valid;
- struct crypto_cipher *tfm;
- u32 flags;
-};
-
-static int dbg;
-
-static void hexdump(char *note, unsigned char *buf, unsigned int len)
-{
- if (dbg) {
- printk(KERN_CRIT "%s", note);
- print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
- 16, 1,
- buf, len, false);
- }
-}
-
-#define dbgprint(format, args...) do {\
-if (dbg)\
- printk(format, ##args);\
-} while (0)
-
-static void xor_vectors(unsigned char *in1, unsigned char *in2,
- unsigned char *out, unsigned int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- out[i] = in1[i] ^ in2[i];
-
-}
-/*
- * Returns DEFAULT_BLK_SZ bytes of random data per call
- * returns 0 if generation succeeded, <0 if something went wrong
- */
-static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
-{
- int i;
- unsigned char tmp[DEFAULT_BLK_SZ];
- unsigned char *output = NULL;
-
-
- dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
- ctx);
-
- hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
-
- /*
- * This algorithm is a 3 stage state machine
- */
- for (i = 0; i < 3; i++) {
-
- switch (i) {
- case 0:
- /*
- * Start by encrypting the counter value
- * This gives us an intermediate value I
- */
- memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
- output = ctx->I;
- hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
- break;
- case 1:
-
- /*
- * Next xor I with our secret vector V
- * encrypt that result to obtain our
- * pseudo random data which we output
- */
- xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
- hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
- output = ctx->rand_data;
- break;
- case 2:
- /*
- * First check that we didn't produce the same
- * random data that we did last time around through this
- */
- if (!memcmp(ctx->rand_data, ctx->last_rand_data,
- DEFAULT_BLK_SZ)) {
- if (cont_test) {
- panic("cprng %p Failed repetition check!\n",
- ctx);
- }
-
- printk(KERN_ERR
- "ctx %p Failed repetition check!\n",
- ctx);
-
- ctx->flags |= PRNG_NEED_RESET;
- return -EINVAL;
- }
- memcpy(ctx->last_rand_data, ctx->rand_data,
- DEFAULT_BLK_SZ);
-
- /*
- * Lastly xor the random data with I
- * and encrypt that to obtain a new secret vector V
- */
- xor_vectors(ctx->rand_data, ctx->I, tmp,
- DEFAULT_BLK_SZ);
- output = ctx->V;
- hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
- break;
- }
-
-
- /* do the encryption */
- crypto_cipher_encrypt_one(ctx->tfm, output, tmp);
-
- }
-
- /*
- * Now update our DT value
- */
- for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
- ctx->DT[i] += 1;
- if (ctx->DT[i] != 0)
- break;
- }
-
- dbgprint("Returning new block for context %p\n", ctx);
- ctx->rand_data_valid = 0;
-
- hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
- hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
-
- return 0;
-}
-
-/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
- int do_cont_test)
-{
- unsigned char *ptr = buf;
- unsigned int byte_count = (unsigned int)nbytes;
- int err;
-
-
- spin_lock_bh(&ctx->prng_lock);
-
- err = -EINVAL;
- if (ctx->flags & PRNG_NEED_RESET)
- goto done;
-
- /*
- * If the FIXED_SIZE flag is on, only return whole blocks of
- * pseudo random data
- */
- err = -EINVAL;
- if (ctx->flags & PRNG_FIXED_SIZE) {
- if (nbytes < DEFAULT_BLK_SZ)
- goto done;
- byte_count = DEFAULT_BLK_SZ;
- }
-
- /*
- * Return 0 in case of success as mandated by the kernel
- * crypto API interface definition.
- */
- err = 0;
-
- dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
- byte_count, ctx);
-
-
-remainder:
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
-
- /*
- * Copy any data less than an entire block
- */
- if (byte_count < DEFAULT_BLK_SZ) {
-empty_rbuf:
- while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
- *ptr = ctx->rand_data[ctx->rand_data_valid];
- ptr++;
- byte_count--;
- ctx->rand_data_valid++;
- if (byte_count == 0)
- goto done;
- }
- }
-
- /*
- * Now copy whole blocks
- */
- for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
- if (ctx->rand_data_valid > 0)
- goto empty_rbuf;
- memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
- ctx->rand_data_valid += DEFAULT_BLK_SZ;
- ptr += DEFAULT_BLK_SZ;
- }
-
- /*
- * Now go back and get any remaining partial block
- */
- if (byte_count)
- goto remainder;
-
-done:
- spin_unlock_bh(&ctx->prng_lock);
- dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
- err, ctx);
- return err;
-}
-
-static void free_prng_context(struct prng_context *ctx)
-{
- crypto_free_cipher(ctx->tfm);
-}
-
-static int reset_prng_context(struct prng_context *ctx,
- const unsigned char *key, size_t klen,
- const unsigned char *V, const unsigned char *DT)
-{
- int ret;
- const unsigned char *prng_key;
-
- spin_lock_bh(&ctx->prng_lock);
- ctx->flags |= PRNG_NEED_RESET;
-
- prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
-
- if (!key)
- klen = DEFAULT_PRNG_KSZ;
-
- if (V)
- memcpy(ctx->V, V, DEFAULT_BLK_SZ);
- else
- memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
-
- if (DT)
- memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
- else
- memset(ctx->DT, 0, DEFAULT_BLK_SZ);
-
- memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
- memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
-
- ctx->rand_data_valid = DEFAULT_BLK_SZ;
-
- ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
- if (ret) {
- dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
- crypto_cipher_get_flags(ctx->tfm));
- goto out;
- }
-
- ret = 0;
- ctx->flags &= ~PRNG_NEED_RESET;
-out:
- spin_unlock_bh(&ctx->prng_lock);
- return ret;
-}
-
-static int cprng_init(struct crypto_tfm *tfm)
-{
- struct prng_context *ctx = crypto_tfm_ctx(tfm);
-
- spin_lock_init(&ctx->prng_lock);
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- return PTR_ERR(ctx->tfm);
- }
-
- if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
- return -EINVAL;
-
- /*
- * after allocation, we should always force the user to reset
- * so they don't inadvertently use the insecure default values
- * without specifying them intentially
- */
- ctx->flags |= PRNG_NEED_RESET;
- return 0;
-}
-
-static void cprng_exit(struct crypto_tfm *tfm)
-{
- free_prng_context(crypto_tfm_ctx(tfm));
-}
-
-static int cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 0);
-}
-
-/*
- * This is the cprng_registered reset method the seed value is
- * interpreted as the tuple { V KEY DT}
- * V and KEY are required during reset, and DT is optional, detected
- * as being present by testing the length of the seed
- */
-static int cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
- const u8 *key = seed + DEFAULT_BLK_SZ;
- const u8 *dt = NULL;
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
- dt = key + DEFAULT_PRNG_KSZ;
-
- reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt);
-
- if (prng->flags & PRNG_NEED_RESET)
- return -EINVAL;
- return 0;
-}
-
-#ifdef CONFIG_CRYPTO_FIPS
-static int fips_cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 1);
-}
-
-static int fips_cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- u8 rdata[DEFAULT_BLK_SZ];
- const u8 *key = seed + DEFAULT_BLK_SZ;
- int rc;
-
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- /* fips strictly requires seed != key */
- if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
- return -EINVAL;
-
- rc = cprng_reset(tfm, seed, slen);
-
- if (!rc)
- goto out;
-
- /* this primes our continuity test */
- rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
- prng->rand_data_valid = DEFAULT_BLK_SZ;
-
-out:
- return rc;
-}
-#endif
-
-static struct rng_alg rng_algs[] = { {
- .generate = cprng_get_random,
- .seed = cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "stdrng",
- .cra_driver_name = "ansi_cprng",
- .cra_priority = 100,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#ifdef CONFIG_CRYPTO_FIPS
-}, {
- .generate = fips_cprng_get_random,
- .seed = fips_cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "fips(ansi_cprng)",
- .cra_driver_name = "fips_ansi_cprng",
- .cra_priority = 300,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#endif
-} };
-
-/* Module initalization */
-static int __init prng_mod_init(void)
-{
- return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-static void __exit prng_mod_fini(void)
-{
- crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
-MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
-module_param(dbg, int, 0);
-MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-subsys_initcall(prng_mod_init);
-module_exit(prng_mod_fini);
-MODULE_ALIAS_CRYPTO("stdrng");
-MODULE_ALIAS_CRYPTO("ansi_cprng");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 5da0241ef453..4b01b6ec961a 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -29,11 +29,11 @@
*
*/
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
-#include <linux/crypto.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
@@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
- kappa[i] = be32_to_cpu(key[i]);
+ kappa[i] = get_unaligned_be32(&in_key[4 * i]);
/*
* generate R + 1 round keys:
@@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
- u8 *ciphertext, const u8 *plaintext, const int R)
+ u8 *dst, const u8 *src, const int R)
{
- const __be32 *src = (const __be32 *)plaintext;
- __be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
@@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
- state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
+ state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
@@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
*/
for (i = 0; i < 4; i++)
- dst[i] = cpu_to_be32(inter[i]);
+ put_unaligned_be32(inter[i], &dst[4 * i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
@@ -687,10 +683,7 @@ static struct crypto_alg anubis_alg = {
static int __init anubis_mod_init(void)
{
- int ret = 0;
-
- ret = crypto_register_alg(&anubis_alg);
- return ret;
+ return crypto_register_alg(&anubis_alg);
}
static void __exit anubis_mod_fini(void)
@@ -698,7 +691,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-subsys_initcall(anubis_mod_init);
+module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/api.c b/crypto/api.c
index 64f2d365a8e9..5724d62e9d07 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,10 +31,14 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
-DEFINE_STATIC_KEY_FALSE(crypto_boot_test_finished);
-EXPORT_SYMBOL_GPL(crypto_boot_test_finished);
+#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
+DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
+#endif
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask);
+static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ u32 mask);
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
@@ -66,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
if ((q->cra_flags ^ type) & mask)
continue;
- if (crypto_is_larval(q) &&
- !crypto_is_test_larval((struct crypto_larval *)q) &&
- ((struct crypto_larval *)q)->mask != mask)
- continue;
-
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
@@ -109,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
if (!larval)
return ERR_PTR(-ENOMEM);
+ type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
+
larval->mask = mask;
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
larval->alg.cra_priority = -1;
@@ -144,41 +145,37 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (alg != &larval->alg) {
kfree(larval);
if (crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
+ alg = crypto_larval_wait(alg, type, mask);
}
return alg;
}
-void crypto_larval_kill(struct crypto_alg *alg)
+static void crypto_larval_kill(struct crypto_larval *larval)
{
- struct crypto_larval *larval = (void *)alg;
+ bool unlinked;
down_write(&crypto_alg_sem);
- list_del(&alg->cra_list);
+ unlinked = list_empty(&larval->alg.cra_list);
+ if (!unlinked)
+ list_del_init(&larval->alg.cra_list);
up_write(&crypto_alg_sem);
+
+ if (unlinked)
+ return;
+
complete_all(&larval->completion);
- crypto_alg_put(alg);
+ crypto_alg_put(&larval->alg);
}
-EXPORT_SYMBOL_GPL(crypto_larval_kill);
-void crypto_wait_for_test(struct crypto_larval *larval)
+void crypto_schedule_test(struct crypto_larval *larval)
{
int err;
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
- if (WARN_ON_ONCE(err != NOTIFY_STOP))
- goto out;
-
- err = wait_for_completion_killable(&larval->completion);
- WARN_ON(err);
- if (!err)
- crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
-
-out:
- crypto_larval_kill(&larval->alg);
+ WARN_ON_ONCE(err != NOTIFY_STOP);
}
-EXPORT_SYMBOL_GPL(crypto_wait_for_test);
+EXPORT_SYMBOL_GPL(crypto_schedule_test);
static void crypto_start_test(struct crypto_larval *larval)
{
@@ -197,28 +194,45 @@ static void crypto_start_test(struct crypto_larval *larval)
larval->test_started = true;
up_write(&crypto_alg_sem);
- crypto_wait_for_test(larval);
+ crypto_schedule_test(larval);
}
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask)
{
- struct crypto_larval *larval = (void *)alg;
- long timeout;
+ struct crypto_larval *larval;
+ long time_left;
+
+again:
+ larval = container_of(alg, struct crypto_larval, alg);
- if (!static_branch_likely(&crypto_boot_test_finished))
+ if (!crypto_boot_test_finished())
crypto_start_test(larval);
- timeout = wait_for_completion_killable_timeout(
+ time_left = wait_for_completion_killable_timeout(
&larval->completion, 60 * HZ);
alg = larval->adult;
- if (timeout < 0)
+ if (time_left < 0)
alg = ERR_PTR(-EINTR);
- else if (!timeout)
+ else if (!time_left) {
+ if (crypto_is_test_larval(larval))
+ crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
- else if (!alg)
- alg = ERR_PTR(-ENOENT);
- else if (IS_ERR(alg))
+ } else if (!alg || PTR_ERR(alg) == -EEXIST) {
+ int err = alg ? -EEXIST : -EAGAIN;
+
+ /*
+ * EEXIST is expected because two probes can be scheduled
+ * at the same time with one using alg_name and the other
+ * using driver_name. Do a re-lookup but do not retry in
+ * case we hit a quirk like gcm_base(ctr(aes),...) which
+ * will never match.
+ */
+ alg = &larval->alg;
+ alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
+ ERR_PTR(err);
+ } else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
@@ -229,6 +243,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = ERR_PTR(-EAGAIN);
crypto_mod_put(&larval->alg);
+ if (!IS_ERR(alg) && crypto_is_larval(alg))
+ goto again;
+
return alg;
}
@@ -292,9 +309,13 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
}
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
- else if (!alg)
+ alg = crypto_larval_wait(alg, type, mask);
+ else if (alg)
+ ;
+ else if (!(mask & CRYPTO_ALG_TESTED))
alg = crypto_larval_add(name, type, mask);
+ else
+ alg = ERR_PTR(-ENOENT);
return alg;
}
@@ -336,25 +357,16 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
- alg = crypto_larval_wait(larval);
+ alg = crypto_larval_wait(larval, type, mask);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
- crypto_larval_kill(larval);
+ crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
-static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
-
- if (type_obj)
- return type_obj->init(tfm, type, mask);
- return 0;
-}
-
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
@@ -379,10 +391,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- len += crypto_compress_ctxsize(alg);
- break;
}
return len;
@@ -396,23 +404,20 @@ void crypto_shoot_alg(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
-struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
- u32 mask)
+struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
+ u32 mask, gfp_t gfp)
{
- struct crypto_tfm *tfm = NULL;
+ struct crypto_tfm *tfm;
unsigned int tfm_size;
int err = -ENOMEM;
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
- tfm = kzalloc(tfm_size, GFP_KERNEL);
+ tfm = kzalloc(tfm_size, gfp);
if (tfm == NULL)
goto out_err;
tfm->__crt_alg = alg;
-
- err = crypto_init_ops(tfm, type, mask);
- if (err)
- goto out_free_tfm;
+ refcount_set(&tfm->refcnt, 1);
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
goto cra_init_failed;
@@ -421,7 +426,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
cra_init_failed:
crypto_exit_ops(tfm);
-out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(tfm);
@@ -430,6 +434,13 @@ out_err:
out:
return tfm;
}
+EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
+
+struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
+ u32 mask)
+{
+ return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
/*
@@ -488,26 +499,44 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-void *crypto_create_tfm_node(struct crypto_alg *alg,
- const struct crypto_type *frontend,
- int node)
+static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
+ const struct crypto_type *frontend, int node,
+ gfp_t gfp)
{
- char *mem;
- struct crypto_tfm *tfm = NULL;
+ struct crypto_tfm *tfm;
unsigned int tfmsize;
unsigned int total;
- int err = -ENOMEM;
+ char *mem;
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
- mem = kzalloc_node(total, GFP_KERNEL, node);
+ mem = kzalloc_node(total, gfp, node);
if (mem == NULL)
- goto out_err;
+ return ERR_PTR(-ENOMEM);
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
tfm->node = node;
+ refcount_set(&tfm->refcnt, 1);
+
+ return mem;
+}
+
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend,
+ int node)
+{
+ struct crypto_tfm *tfm;
+ char *mem;
+ int err;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
+ if (IS_ERR(mem))
+ goto out;
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->fb = tfm;
err = frontend->init_tfm(tfm);
if (err)
@@ -524,13 +553,38 @@ out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(mem);
-out_err:
mem = ERR_PTR(err);
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm)
+{
+ struct crypto_alg *alg = otfm->__crt_alg;
+ struct crypto_tfm *tfm;
+ char *mem;
+
+ mem = ERR_PTR(-ESTALE);
+ if (unlikely(!crypto_mod_get(alg)))
+ goto out;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
+ if (IS_ERR(mem)) {
+ crypto_mod_put(alg);
+ goto out;
+ }
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->crt_flags = otfm->crt_flags;
+ tfm->fb = tfm;
+
+out:
+ return mem;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_tfm);
+
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
@@ -620,6 +674,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
if (IS_ERR_OR_NULL(mem))
return;
+ if (!refcount_dec_and_test(&tfm->refcnt))
+ return;
alg = tfm->__crt_alg;
if (!tfm->exit && alg->cra_exit)
@@ -644,9 +700,9 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
-void crypto_req_done(struct crypto_async_request *req, int err)
+void crypto_req_done(void *data, int err)
{
- struct crypto_wait *wait = req->data;
+ struct crypto_wait *wait = data;
if (err == -EINPROGRESS)
return;
@@ -656,5 +712,31 @@ void crypto_req_done(struct crypto_async_request *req, int err)
}
EXPORT_SYMBOL_GPL(crypto_req_done);
+void crypto_destroy_alg(struct crypto_alg *alg)
+{
+ if (alg->cra_type && alg->cra_type->destroy)
+ alg->cra_type->destroy(alg);
+ if (alg->cra_destroy)
+ alg->cra_destroy(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct crypto_async_request *nreq;
+
+ nreq = kmemdup(req, total, gfp);
+ if (!nreq) {
+ req->tfm = tfm->fb;
+ return req;
+ }
+
+ nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(crypto_request_clone);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 3254dcc34368..1608018111d0 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -7,7 +7,6 @@
* Jon Oberheide <jon@oberheide.org>
*/
-#include <crypto/algapi.h>
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
@@ -15,33 +14,31 @@
#include <linux/module.h>
#include <linux/sched.h>
-static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+#define ARC4_ALIGN __alignof__(struct arc4_ctx)
+
+static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm);
return arc4_setkey(ctx, in_key, key_len);
}
-static int crypto_arc4_crypt(struct skcipher_request *req)
+static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- int err;
+ struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm);
- err = skcipher_walk_virt(&walk, req, false);
+ if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+ memcpy(siv, ctx, sizeof(*ctx));
- while (walk.nbytes > 0) {
- arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
+ ctx = (struct arc4_ctx *)siv;
- return err;
+ arc4_crypt(ctx, dst, src, nbytes);
+ return 0;
}
-static int crypto_arc4_init(struct crypto_skcipher *tfm)
+static int crypto_arc4_init(struct crypto_lskcipher *tfm)
{
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n",
current->comm, (unsigned long)current->pid);
@@ -49,36 +46,34 @@ static int crypto_arc4_init(struct crypto_skcipher *tfm)
return 0;
}
-static struct skcipher_alg arc4_alg = {
- /*
- * For legacy reasons, this is named "ecb(arc4)", not "arc4".
- * Nevertheless it's actually a stream cipher, not a block cipher.
- */
- .base.cra_name = "ecb(arc4)",
- .base.cra_driver_name = "ecb(arc4)-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = ARC4_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct arc4_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .setkey = crypto_arc4_setkey,
- .encrypt = crypto_arc4_crypt,
- .decrypt = crypto_arc4_crypt,
- .init = crypto_arc4_init,
+static struct lskcipher_alg arc4_alg = {
+ .co.base.cra_name = "arc4",
+ .co.base.cra_driver_name = "arc4-generic",
+ .co.base.cra_priority = 100,
+ .co.base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .co.base.cra_ctxsize = sizeof(struct arc4_ctx),
+ .co.base.cra_alignmask = ARC4_ALIGN - 1,
+ .co.base.cra_module = THIS_MODULE,
+ .co.min_keysize = ARC4_MIN_KEY_SIZE,
+ .co.max_keysize = ARC4_MAX_KEY_SIZE,
+ .co.statesize = sizeof(struct arc4_ctx),
+ .setkey = crypto_arc4_setkey,
+ .encrypt = crypto_arc4_crypt,
+ .decrypt = crypto_arc4_crypt,
+ .init = crypto_arc4_init,
};
static int __init arc4_init(void)
{
- return crypto_register_skcipher(&arc4_alg);
+ return crypto_register_lskcipher(&arc4_alg);
}
static void __exit arc4_exit(void)
{
- crypto_unregister_skcipher(&arc4_alg);
+ crypto_unregister_lskcipher(&arc4_alg);
}
-subsys_initcall(arc4_init);
+module_init(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index 4cc29b82b99d..faa7900383f6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -15,6 +15,7 @@
*/
#include <crypto/aria.h>
+#include <linux/unaligned.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
@@ -27,7 +28,6 @@ static const u32 key_rc[20] = {
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
- const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
@@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
ck = &key_rc[(key_len - 16) / 2];
- w0[0] = be32_to_cpu(key[0]);
- w0[1] = be32_to_cpu(key[1]);
- w0[2] = be32_to_cpu(key[2]);
- w0[3] = be32_to_cpu(key[3]);
+ w0[0] = get_unaligned_be32(&in_key[0]);
+ w0[1] = get_unaligned_be32(&in_key[4]);
+ w0[2] = get_unaligned_be32(&in_key[8]);
+ w0[3] = get_unaligned_be32(&in_key[12]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
@@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
if (key_len > 16) {
- w1[0] = be32_to_cpu(key[4]);
- w1[1] = be32_to_cpu(key[5]);
+ w1[0] = get_unaligned_be32(&in_key[16]);
+ w1[1] = get_unaligned_be32(&in_key[20]);
if (key_len > 24) {
- w1[2] = be32_to_cpu(key[6]);
- w1[3] = be32_to_cpu(key[7]);
+ w1[2] = get_unaligned_be32(&in_key[24]);
+ w1[3] = get_unaligned_be32(&in_key[28]);
} else {
w1[2] = 0;
w1[3] = 0;
@@ -178,6 +178,10 @@ int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
+ BUILD_BUG_ON(sizeof(ctx->enc_key) != 272);
+ BUILD_BUG_ON(sizeof(ctx->dec_key) != 272);
+ BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds));
+
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;
@@ -191,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
- reg0 = be32_to_cpu(src[0]);
- reg1 = be32_to_cpu(src[1]);
- reg2 = be32_to_cpu(src[2]);
- reg3 = be32_to_cpu(src[3]);
+ reg0 = get_unaligned_be32(&in[0]);
+ reg1 = get_unaligned_be32(&in[4]);
+ reg2 = get_unaligned_be32(&in[8]);
+ reg3 = get_unaligned_be32(&in[12]);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
@@ -237,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
- dst[0] = cpu_to_be32(reg0);
- dst[1] = cpu_to_be32(reg1);
- dst[2] = cpu_to_be32(reg2);
- dst[3] = cpu_to_be32(reg3);
+ put_unaligned_be32(reg0, &out[0]);
+ put_unaligned_be32(reg1, &out[4]);
+ put_unaligned_be32(reg2, &out[8]);
+ put_unaligned_be32(reg3, &out[12]);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
@@ -280,7 +282,6 @@ static struct crypto_alg aria_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -303,7 +304,7 @@ static void __exit aria_fini(void)
crypto_unregister_alg(&aria_alg);
}
-subsys_initcall(aria_init);
+module_init(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 3df3fe4ed95f..e1345b8f39f1 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB
select CRYPTO_HASH_INFO
select CRYPTO_AKCIPHER
+ select CRYPTO_SIG
select CRYPTO_HASH
help
This option provides support for asymmetric public key type handling.
@@ -76,13 +77,30 @@ config SIGNED_PE_FILE_VERIFICATION
signed PE binary.
config FIPS_SIGNATURE_SELFTEST
- bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
+ tristate "Run FIPS selftests on the X.509+PKCS7 signature verification"
help
This option causes some selftests to be run on the signature
verification code, using some built in data. This is required
for FIPS.
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
- depends on PKCS7_MESSAGE_PARSER
+ depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
+ depends on X509_CERTIFICATE_PARSER
+ depends on CRYPTO_RSA
+ depends on CRYPTO_SHA256
+
+config FIPS_SIGNATURE_SELFTEST_RSA
+ bool
+ default y
+ depends on FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_RSA=y || CRYPTO_RSA=FIPS_SIGNATURE_SELFTEST
+
+config FIPS_SIGNATURE_SELFTEST_ECDSA
+ bool
+ default y
+ depends on FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_ECDSA=y || CRYPTO_ECDSA=FIPS_SIGNATURE_SELFTEST
endif # ASYMMETRIC_KEY_TYPE
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index 0d1fa1b692c6..bc65d3b98dcb 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -22,7 +22,10 @@ x509_key_parser-y := \
x509_cert_parser.o \
x509_loader.o \
x509_public_key.o
-x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
+obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
+x509_selftest-y += selftest.o
+x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_RSA) += selftest_rsa.o
+x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA) += selftest_ecdsa.o
$(obj)/x509_cert_parser.o: \
$(obj)/x509.asn1.h \
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 41a2f0eb4ce4..348966ea2175 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -11,23 +11,13 @@
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
#include <keys/user-type.h>
#include "asymmetric_keys.h"
-MODULE_LICENSE("GPL");
-
-const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
- [VERIFYING_MODULE_SIGNATURE] = "mod sig",
- [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig",
- [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig",
- [VERIFYING_KEY_SIGNATURE] = "key sig",
- [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig",
- [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig",
-};
-EXPORT_SYMBOL_GPL(key_being_used_for);
static LIST_HEAD(asymmetric_key_parsers);
static DECLARE_RWSEM(asymmetric_key_parsers_sem);
@@ -61,17 +51,18 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
- WARN_ON(!id_0 && !id_1 && !id_2);
-
if (id_0) {
lookup = id_0->data;
len = id_0->len;
} else if (id_1) {
lookup = id_1->data;
len = id_1->len;
- } else {
+ } else if (id_2) {
lookup = id_2->data;
len = id_2->len;
+ } else {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
}
/* Construct an identifier "id:<keyid>". */
@@ -151,12 +142,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_2)
{
struct asymmetric_key_id *kid;
-
- kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
- GFP_KERNEL);
+ size_t kid_sz;
+ size_t len;
+
+ if (check_add_overflow(len_1, len_2, &len))
+ return ERR_PTR(-EOVERFLOW);
+ if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz))
+ return ERR_PTR(-EOVERFLOW);
+ kid = kmalloc(kid_sz, GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
- kid->len = len_1 + len_2;
+ kid->len = len;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 839591ad21ac..8aecbe4637f3 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,12 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
- case OID_md4:
- ctx->digest_algo = "md4";
- break;
- case OID_md5:
- ctx->digest_algo = "md5";
- break;
case OID_sha1:
ctx->digest_algo = "sha1";
break;
@@ -93,8 +87,14 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
case OID_sha512:
ctx->digest_algo = "sha512";
break;
- case OID_sha224:
- ctx->digest_algo = "sha224";
+ case OID_sha3_256:
+ ctx->digest_algo = "sha3-256";
+ break;
+ case OID_sha3_384:
+ ctx->digest_algo = "sha3-384";
+ break;
+ case OID_sha3_512:
+ ctx->digest_algo = "sha3-512";
break;
case OID__NR:
diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1
index 1eca740b816a..28e1f4a41c14 100644
--- a/crypto/asymmetric_keys/pkcs7.asn1
+++ b/crypto/asymmetric_keys/pkcs7.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2009 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5652#section-3
+
PKCS7ContentInfo ::= SEQUENCE {
contentType ContentType ({ pkcs7_check_content_type }),
content [0] EXPLICIT SignedData OPTIONAL
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 277482bb1777..423d13c47545 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,12 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
- case OID_md4:
- ctx->sinfo->sig->hash_algo = "md4";
- break;
- case OID_md5:
- ctx->sinfo->sig->hash_algo = "md5";
- break;
case OID_sha1:
ctx->sinfo->sig->hash_algo = "sha1";
break;
@@ -257,6 +251,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
case OID_gost2012Digest512:
ctx->sinfo->sig->hash_algo = "streebog512";
break;
+ case OID_sha3_256:
+ ctx->sinfo->sig->hash_algo = "sha3-256";
+ break;
+ case OID_sha3_384:
+ ctx->sinfo->sig->hash_algo = "sha3-384";
+ break;
+ case OID_sha3_512:
+ ctx->sinfo->sig->hash_algo = "sha3-512";
+ break;
default:
printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG;
@@ -283,13 +286,12 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
case OID_id_ecdsa_with_sha512:
+ case OID_id_ecdsa_with_sha3_256:
+ case OID_id_ecdsa_with_sha3_384:
+ case OID_id_ecdsa_with_sha3_512:
ctx->sinfo->sig->pkey_algo = "ecdsa";
ctx->sinfo->sig->encoding = "x962";
break;
- case OID_SM2_with_SM3:
- ctx->sinfo->sig->pkey_algo = "sm2";
- ctx->sinfo->sig->encoding = "raw";
- break;
case OID_gost2012PKey256:
case OID_gost2012PKey512:
ctx->sinfo->sig->pkey_algo = "ecrdsa";
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index f6321c785714..6d6475e3a9bf 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
}
if (sinfo->msgdigest_len != sig->digest_size) {
- pr_debug("Sig %u: Invalid digest size (%u)\n",
- sinfo->index, sinfo->msgdigest_len);
+ pr_warn("Sig %u: Invalid digest size (%u)\n",
+ sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
- pr_debug("Sig %u: Message digest doesn't match\n",
- sinfo->index);
+ pr_warn("Sig %u: Message digest doesn't match\n",
+ sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
@@ -429,6 +429,7 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
/* Authattr presence checked in parser */
break;
case VERIFYING_UNSPECIFIED_SIGNATURE:
+ case VERIFYING_BPF_SIGNATURE:
if (pkcs7->data_type != OID_data) {
pr_warn("Invalid unspecified sig (not pkcs7-data)\n");
return -EKEYREJECTED;
@@ -478,10 +479,11 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen)
{
if (pkcs7->data) {
- pr_debug("Data already supplied\n");
+ pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;
pkcs7->data_len = datalen;
return 0;
}
+EXPORT_SYMBOL_GPL(pkcs7_supply_detached_data);
diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1
index 702c41a3c713..a2a8af2633d8 100644
--- a/crypto/asymmetric_keys/pkcs8.asn1
+++ b/crypto/asymmetric_keys/pkcs8.asn1
@@ -1,3 +1,9 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2010 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5958#section-2
--
-- This is the unencrypted variant
--
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 2f8352e88860..e5b177c8e842 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -8,18 +8,17 @@
*/
#define pr_fmt(fmt) "PKEY: "fmt
-#include <linux/module.h>
-#include <linux/export.h>
+#include <crypto/akcipher.h>
+#include <crypto/public_key.h>
+#include <crypto/sig.h>
+#include <keys/asymmetric-subtype.h>
+#include <linux/asn1.h>
+#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/scatterlist.h>
-#include <linux/asn1.h>
-#include <keys/asymmetric-subtype.h>
-#include <crypto/public_key.h>
-#include <crypto/akcipher.h>
-#include <crypto/sm2.h>
-#include <crypto/sm3_base.h>
+#include <linux/slab.h>
+#include <linux/string.h>
MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
MODULE_AUTHOR("Red Hat, Inc.");
@@ -43,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key,
void public_key_free(struct public_key *key)
{
if (key) {
- kfree(key->key);
+ kfree_sensitive(key->key);
kfree(key->params);
kfree(key);
}
@@ -67,10 +66,13 @@ static void public_key_destroy(void *payload0, void *payload3)
static int
software_key_determine_akcipher(const struct public_key *pkey,
const char *encoding, const char *hash_algo,
- char alg_name[CRYPTO_MAX_ALG_NAME])
+ char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig,
+ enum kernel_pkey_operation op)
{
int n;
+ *sig = true;
+
if (!encoding)
return -EINVAL;
@@ -79,14 +81,23 @@ software_key_determine_akcipher(const struct public_key *pkey,
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
- if (!hash_algo)
+ *sig = op == kernel_pkey_sign ||
+ op == kernel_pkey_verify;
+ if (!*sig) {
+ /*
+ * For encrypt/decrypt, hash_algo is not used
+ * but allowed to be set for historic reasons.
+ */
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
- else
+ } else {
+ if (!hash_algo)
+ hash_algo = "none";
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
- "pkcs1pad(%s,%s)",
+ "pkcs1(%s,%s)",
pkey->pkey_algo, hash_algo);
+ }
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
}
if (strcmp(encoding, "raw") != 0)
@@ -97,8 +108,10 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (hash_algo)
return -EINVAL;
+ *sig = false;
} else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
- if (strcmp(encoding, "x962") != 0)
+ if (strcmp(encoding, "x962") != 0 &&
+ strcmp(encoding, "p1363") != 0)
return -EINVAL;
/*
* ECDSA signatures are taken over a raw hash, so they don't
@@ -113,15 +126,14 @@ software_key_determine_akcipher(const struct public_key *pkey,
strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
- strcmp(hash_algo, "sha512") != 0)
- return -EINVAL;
- } else if (strcmp(pkey->pkey_algo, "sm2") == 0) {
- if (strcmp(encoding, "raw") != 0)
- return -EINVAL;
- if (!hash_algo)
- return -EINVAL;
- if (strcmp(hash_algo, "sm3") != 0)
+ strcmp(hash_algo, "sha512") != 0 &&
+ strcmp(hash_algo, "sha3-256") != 0 &&
+ strcmp(hash_algo, "sha3-384") != 0 &&
+ strcmp(hash_algo, "sha3-512") != 0)
return -EINVAL;
+ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
+ encoding, pkey->pkey_algo);
+ return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
} else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
@@ -151,56 +163,100 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val)
static int software_key_query(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
- struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
u8 *key, *ptr;
int ret, len;
+ bool issig;
ret = software_key_determine_akcipher(pkey, params->encoding,
- params->hash_algo, alg_name);
+ params->hash_algo, alg_name,
+ &issig, kernel_pkey_sign);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = -ENOMEM;
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
- goto error_free_tfm;
+ return -ENOMEM;
+
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
- if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
- else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
- if (ret < 0)
- goto error_free_key;
+ memset(info, 0, sizeof(*info));
- len = crypto_akcipher_maxsize(tfm);
- info->key_size = len * 8;
- info->max_data_size = len;
- info->max_sig_size = len;
- info->max_enc_size = len;
- info->max_dec_size = len;
- info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
- KEYCTL_SUPPORTS_VERIFY);
- if (pkey->key_is_private)
- info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT |
- KEYCTL_SUPPORTS_SIGN);
- ret = 0;
+ if (issig) {
+ struct crypto_sig *sig;
+
+ sig = crypto_alloc_sig(alg_name, 0, 0);
+ if (IS_ERR(sig)) {
+ ret = PTR_ERR(sig);
+ goto error_free_key;
+ }
+
+ if (pkey->key_is_private)
+ ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
+ else
+ ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
+ if (ret < 0)
+ goto error_free_sig;
+
+ len = crypto_sig_keysize(sig);
+ info->key_size = len;
+ info->max_sig_size = crypto_sig_maxsize(sig);
+ info->max_data_size = crypto_sig_digestsize(sig);
+
+ info->supported_ops = KEYCTL_SUPPORTS_VERIFY;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
+
+ if (strcmp(params->encoding, "pkcs1") == 0) {
+ info->max_enc_size = len / BITS_PER_BYTE;
+ info->max_dec_size = len / BITS_PER_BYTE;
+
+ info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
+ }
+
+error_free_sig:
+ crypto_free_sig(sig);
+ } else {
+ struct crypto_akcipher *tfm;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto error_free_key;
+ }
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ if (ret < 0)
+ goto error_free_akcipher;
+
+ len = crypto_akcipher_maxsize(tfm);
+ info->key_size = len * BITS_PER_BYTE;
+ info->max_sig_size = len;
+ info->max_data_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = len;
+
+ info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
+
+error_free_akcipher:
+ crypto_free_akcipher(tfm);
+ }
error_free_key:
- kfree(key);
-error_free_tfm:
- crypto_free_akcipher(tfm);
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -212,34 +268,25 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
const void *in, void *out)
{
const struct public_key *pkey = params->key->payload.data[asym_crypto];
- struct akcipher_request *req;
- struct crypto_akcipher *tfm;
- struct crypto_wait cwait;
- struct scatterlist in_sg, out_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_akcipher *tfm;
+ struct crypto_sig *sig;
char *key, *ptr;
+ bool issig;
int ret;
pr_devel("==>%s()\n", __func__);
ret = software_key_determine_akcipher(pkey, params->encoding,
- params->hash_algo, alg_name);
+ params->hash_algo, alg_name,
+ &issig, params->op);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = -ENOMEM;
- req = akcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req)
- goto error_free_tfm;
-
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
- goto error_free_req;
+ return -ENOMEM;
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
@@ -247,122 +294,84 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
- if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
- else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
- if (ret)
- goto error_free_key;
+ if (issig) {
+ sig = crypto_alloc_sig(alg_name, 0, 0);
+ if (IS_ERR(sig)) {
+ ret = PTR_ERR(sig);
+ goto error_free_key;
+ }
- sg_init_one(&in_sg, in, params->in_len);
- sg_init_one(&out_sg, out, params->out_len);
- akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
- params->out_len);
- crypto_init_wait(&cwait);
- akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &cwait);
+ if (pkey->key_is_private)
+ ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
+ else
+ ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
+ if (ret)
+ goto error_free_tfm;
+ } else {
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto error_free_key;
+ }
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ if (ret)
+ goto error_free_tfm;
+ }
+
+ ret = -EINVAL;
/* Perform the encryption calculation. */
switch (params->op) {
case kernel_pkey_encrypt:
- ret = crypto_akcipher_encrypt(req);
+ if (issig)
+ break;
+ ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len,
+ out, params->out_len);
break;
case kernel_pkey_decrypt:
- ret = crypto_akcipher_decrypt(req);
+ if (issig)
+ break;
+ ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len,
+ out, params->out_len);
break;
case kernel_pkey_sign:
- ret = crypto_akcipher_sign(req);
+ if (!issig)
+ break;
+ ret = crypto_sig_sign(sig, in, params->in_len,
+ out, params->out_len);
break;
default:
BUG();
}
- ret = crypto_wait_req(ret, &cwait);
- if (ret == 0)
- ret = req->dst_len;
+ if (!issig && ret == 0)
+ ret = crypto_akcipher_maxsize(tfm);
-error_free_key:
- kfree(key);
-error_free_req:
- akcipher_request_free(req);
error_free_tfm:
- crypto_free_akcipher(tfm);
+ if (issig)
+ crypto_free_sig(sig);
+ else
+ crypto_free_akcipher(tfm);
+error_free_key:
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
-#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
-static int cert_sig_digest_update(const struct public_key_signature *sig,
- struct crypto_akcipher *tfm_pkey)
-{
- struct crypto_shash *tfm;
- struct shash_desc *desc;
- size_t desc_size;
- unsigned char dgst[SM3_DIGEST_SIZE];
- int ret;
-
- BUG_ON(!sig->data);
-
- /* SM2 signatures always use the SM3 hash algorithm */
- if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0)
- return -EINVAL;
-
- ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID,
- SM2_DEFAULT_USERID_LEN, dgst);
- if (ret)
- return ret;
-
- tfm = crypto_alloc_shash(sig->hash_algo, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
- desc = kzalloc(desc_size, GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto error_free_tfm;
- }
-
- desc->tfm = tfm;
-
- ret = crypto_shash_init(desc);
- if (ret < 0)
- goto error_free_desc;
-
- ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE);
- if (ret < 0)
- goto error_free_desc;
-
- ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest);
-
-error_free_desc:
- kfree(desc);
-error_free_tfm:
- crypto_free_shash(tfm);
- return ret;
-}
-#else
-static inline int cert_sig_digest_update(
- const struct public_key_signature *sig,
- struct crypto_akcipher *tfm_pkey)
-{
- return -ENOTSUPP;
-}
-#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct crypto_wait cwait;
- struct crypto_akcipher *tfm;
- struct akcipher_request *req;
- struct scatterlist src_sg[2];
char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_sig *tfm;
char *key, *ptr;
+ bool issig;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -387,23 +396,21 @@ int public_key_verify_signature(const struct public_key *pkey,
}
ret = software_key_determine_akcipher(pkey, sig->encoding,
- sig->hash_algo, alg_name);
+ sig->hash_algo, alg_name,
+ &issig, kernel_pkey_verify);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ tfm = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- ret = -ENOMEM;
- req = akcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req)
- goto error_free_tfm;
-
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
- if (!key)
- goto error_free_req;
+ if (!key) {
+ ret = -ENOMEM;
+ goto error_free_tfm;
+ }
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
@@ -412,35 +419,19 @@ int public_key_verify_signature(const struct public_key *pkey,
memcpy(ptr, pkey->params, pkey->paramlen);
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ ret = crypto_sig_set_privkey(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen);
if (ret)
goto error_free_key;
- if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
- ret = cert_sig_digest_update(sig, tfm);
- if (ret)
- goto error_free_key;
- }
-
- sg_init_table(src_sg, 2);
- sg_set_buf(&src_sg[0], sig->s, sig->s_size);
- sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
- akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
- sig->digest_size);
- crypto_init_wait(&cwait);
- akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &cwait);
- ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
+ ret = crypto_sig_verify(tfm, sig->s, sig->s_size,
+ sig->digest, sig->digest_size);
error_free_key:
- kfree(key);
-error_free_req:
- akcipher_request_free(req);
+ kfree_sensitive(key);
error_free_tfm:
- crypto_free_akcipher(tfm);
+ crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
if (WARN_ON_ONCE(ret > 0))
ret = -EINVAL;
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 6b1ac5f5896a..86292965f493 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -17,9 +17,12 @@ static struct asymmetric_key_id *ca_keyid;
#ifndef MODULE
static struct {
- struct asymmetric_key_id id;
- unsigned char data[10];
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct asymmetric_key_id, id, data,
+ unsigned char data[10];
+ );
} cakey;
+static_assert(offsetof(typeof(cakey), id.data) == offsetof(typeof(cakey), data));
static int __init ca_keys_setup(char *str)
{
@@ -102,12 +105,100 @@ int restrict_link_by_signature(struct key *dest_keyring,
if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags))
ret = -ENOKEY;
+ else if (IS_BUILTIN(CONFIG_SECONDARY_TRUSTED_KEYRING_SIGNED_BY_BUILTIN) &&
+ !strcmp(dest_keyring->description, ".secondary_trusted_keys") &&
+ !test_bit(KEY_FLAG_BUILTIN, &key->flags))
+ ret = -ENOKEY;
else
ret = verify_signature(key, sig);
key_put(key);
return ret;
}
+/**
+ * restrict_link_by_ca - Restrict additions to a ring of CA keys
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @trust_keyring: Unused.
+ *
+ * Check if the new certificate is a CA. If it is a CA, then mark the new
+ * certificate as being ok to link.
+ *
+ * Returns 0 if the new certificate was accepted, -ENOKEY if the
+ * certificate is not a CA. -ENOPKG if the signature uses unsupported
+ * crypto, or some other error if there is a matching certificate but
+ * the signature check cannot be performed.
+ */
+int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ const struct public_key *pkey;
+
+ if (type != &key_type_asymmetric)
+ return -EOPNOTSUPP;
+
+ pkey = payload->data[asym_crypto];
+ if (!pkey)
+ return -ENOPKG;
+ if (!test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
+ return -ENOKEY;
+ if (!test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
+ return -ENOKEY;
+ if (!IS_ENABLED(CONFIG_INTEGRITY_CA_MACHINE_KEYRING_MAX))
+ return 0;
+ if (test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
+ return -ENOKEY;
+
+ return 0;
+}
+
+/**
+ * restrict_link_by_digsig - Restrict additions to a ring of digsig keys
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @trust_keyring: A ring of keys that can be used to vouch for the new cert.
+ *
+ * Check if the new certificate has digitalSignature usage set. If it is,
+ * then mark the new certificate as being ok to link. Afterwards verify
+ * the new certificate against the ones in the trust_keyring.
+ *
+ * Returns 0 if the new certificate was accepted, -ENOKEY if the
+ * certificate is not a digsig. -ENOPKG if the signature uses unsupported
+ * crypto, or some other error if there is a matching certificate but
+ * the signature check cannot be performed.
+ */
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ const struct public_key *pkey;
+
+ if (type != &key_type_asymmetric)
+ return -EOPNOTSUPP;
+
+ pkey = payload->data[asym_crypto];
+
+ if (!pkey)
+ return -ENOPKG;
+
+ if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
+ return -ENOKEY;
+
+ return restrict_link_by_signature(dest_keyring, type, payload,
+ trust_keyring);
+}
+
static bool match_either_id(const struct asymmetric_key_id **pair,
const struct asymmetric_key_id *single)
{
diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
index fa0bf7f24284..98dc5cdfdebe 100644
--- a/crypto/asymmetric_keys/selftest.c
+++ b/crypto/asymmetric_keys/selftest.c
@@ -1,186 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* Self-testing for signature checking.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-#include <linux/kernel.h>
+#include <crypto/pkcs7.h>
#include <linux/cred.h>
+#include <linux/kernel.h>
#include <linux/key.h>
-#include <crypto/pkcs7.h>
+#include <linux/module.h>
+#include "selftest.h"
#include "x509_parser.h"
-struct certs_test {
- const u8 *data;
- size_t data_len;
- const u8 *pkcs7;
- size_t pkcs7_len;
-};
-
-/*
- * Set of X.509 certificates to provide public keys for the tests. These will
- * be loaded into a temporary keyring for the duration of the testing.
- */
-static const __initconst u8 certs_selftest_keys[] = {
- "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
- "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
- "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
- "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
- "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
- "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
- "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
- "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
- "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
- "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
- "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
- "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
- "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
- "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
- "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
- "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
- "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
- "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
- "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
- "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
- "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
- "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
- "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
- "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
- "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
- "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
- "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
- "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
- "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
- "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
- "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
- "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
- "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
- "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
- "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
- "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
- "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
- "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
- "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
- "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
- "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
- "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
- "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
- "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
- "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
- "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
- "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
- "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
- "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
- "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
- "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
- "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
- "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
- "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
- "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
- "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
- "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
- "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
- "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
- "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
- "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
- "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
- "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
- "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
- "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
- "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
- "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
- "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
- "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
- "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
- "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
- "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
- "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
- "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
- "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
- "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
- "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
- "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
- "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
- "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
- "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
- "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
- "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
- "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
- "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
- "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
-};
-
-/*
- * Signed data and detached signature blobs that form the verification tests.
- */
-static const __initconst u8 certs_selftest_1_data[] = {
- "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
- "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
- "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
- "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
- "\x61\x74\x69\x6f\x6e\x2e\x0a"
-};
-
-static const __initconst u8 certs_selftest_1_pkcs7[] = {
- "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
- "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
- "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
- "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
- "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
- "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
- "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
- "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
- "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
- "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
- "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
- "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
- "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
- "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
- "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
- "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
- "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
- "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
- "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
- "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
- "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
- "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
- "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
- "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
- "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
- "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
- "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
- "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
- "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
- "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
- "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
- "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
- "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
- "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
- "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
- "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
- "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
- "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
- "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
- "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
- "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
- "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
- "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
-};
-
-/*
- * List of tests to be run.
- */
-#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 }
-static const struct certs_test certs_tests[] __initconst = {
- TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
-};
-
-int __init fips_signature_selftest(void)
+void fips_signature_selftest(const char *name,
+ const u8 *keys, size_t keys_len,
+ const u8 *data, size_t data_len,
+ const u8 *sig, size_t sig_len)
{
struct key *keyring;
- int ret, i;
+ int ret;
- pr_notice("Running certificate verification selftests\n");
+ pr_notice("Running certificate verification %s selftest\n", name);
keyring = keyring_alloc(".certs_selftest",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
@@ -190,35 +31,42 @@ int __init fips_signature_selftest(void)
KEY_ALLOC_NOT_IN_QUOTA,
NULL, NULL);
if (IS_ERR(keyring))
- panic("Can't allocate certs selftest keyring: %ld\n",
- PTR_ERR(keyring));
+ panic("Can't allocate certs %s selftest keyring: %ld\n", name, PTR_ERR(keyring));
- ret = x509_load_certificate_list(certs_selftest_keys,
- sizeof(certs_selftest_keys) - 1, keyring);
+ ret = x509_load_certificate_list(keys, keys_len, keyring);
if (ret < 0)
- panic("Can't allocate certs selftest keyring: %d\n", ret);
+ panic("Can't allocate certs %s selftest keyring: %d\n", name, ret);
- for (i = 0; i < ARRAY_SIZE(certs_tests); i++) {
- const struct certs_test *test = &certs_tests[i];
- struct pkcs7_message *pkcs7;
+ struct pkcs7_message *pkcs7;
- pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len);
- if (IS_ERR(pkcs7))
- panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret);
+ pkcs7 = pkcs7_parse_message(sig, sig_len);
+ if (IS_ERR(pkcs7))
+ panic("Certs %s selftest: pkcs7_parse_message() = %d\n", name, ret);
- pkcs7_supply_detached_data(pkcs7, test->data, test->data_len);
+ pkcs7_supply_detached_data(pkcs7, data, data_len);
- ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
- if (ret < 0)
- panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret);
+ ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
+ if (ret < 0)
+ panic("Certs %s selftest: pkcs7_verify() = %d\n", name, ret);
- ret = pkcs7_validate_trust(pkcs7, keyring);
- if (ret < 0)
- panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret);
+ ret = pkcs7_validate_trust(pkcs7, keyring);
+ if (ret < 0)
+ panic("Certs %s selftest: pkcs7_validate_trust() = %d\n", name, ret);
- pkcs7_free_message(pkcs7);
- }
+ pkcs7_free_message(pkcs7);
key_put(keyring);
+}
+
+static int __init fips_signature_selftest_init(void)
+{
+ fips_signature_selftest_rsa();
+ fips_signature_selftest_ecdsa();
return 0;
}
+
+late_initcall(fips_signature_selftest_init);
+
+MODULE_DESCRIPTION("X.509 self tests");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/selftest.h b/crypto/asymmetric_keys/selftest.h
new file mode 100644
index 000000000000..4139f05906cb
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Helper function for self-testing PKCS#7 signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+void fips_signature_selftest(const char *name,
+ const u8 *keys, size_t keys_len,
+ const u8 *data, size_t data_len,
+ const u8 *sig, size_t sig_len);
+
+#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_RSA
+void __init fips_signature_selftest_rsa(void);
+#else
+static inline void __init fips_signature_selftest_rsa(void) { }
+#endif
+
+#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA
+void __init fips_signature_selftest_ecdsa(void);
+#else
+static inline void __init fips_signature_selftest_ecdsa(void) { }
+#endif
diff --git a/crypto/asymmetric_keys/selftest_ecdsa.c b/crypto/asymmetric_keys/selftest_ecdsa.c
new file mode 100644
index 000000000000..20a0868b30de
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest_ecdsa.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Self-tests for PKCS#7 ECDSA signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+#include <linux/module.h>
+#include "selftest.h"
+
+/*
+ * Set of X.509 certificates to provide public keys for the tests. These will
+ * be loaded into a temporary keyring for the duration of the testing.
+ */
+static const u8 certs_selftest_ecdsa_keys[] __initconst = {
+ /* P-256 ECDSA certificate */
+ "\x30\x82\x01\xd4\x30\x82\x01\x7b\xa0\x03\x02\x01\x02\x02\x14\x2e"
+ "\xea\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0"
+ "\xf8\x6f\xde\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x30"
+ "\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74"
+ "\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61"
+ "\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d"
+ "\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32"
+ "\x34\x30\x34\x31\x33\x32\x32\x31\x36\x32\x36\x5a\x18\x0f\x32\x31"
+ "\x32\x34\x30\x33\x32\x30\x32\x32\x31\x36\x32\x36\x5a\x30\x3a\x31"
+ "\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74\x69\x66"
+ "\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69"
+ "\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d\x74\x65"
+ "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x59\x30\x13\x06\x07\x2a"
+ "\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07"
+ "\x03\x42\x00\x04\x07\xe5\x6b\x51\xaf\xfc\x19\x41\x2c\x88\x92\x6b"
+ "\x77\x57\x71\x03\x9e\xe2\xfe\x6e\x6a\x71\x4e\xc7\x29\x9f\x90\xe1"
+ "\x77\x18\x9f\xc2\xe7\x0a\x82\xd0\x8a\xe1\x81\xa9\x71\x7c\x5a\x73"
+ "\xfb\x25\xb9\x5b\x1e\x24\x8c\x73\x9f\xf8\x38\xf8\x48\xb4\xad\x16"
+ "\x19\xc0\x22\xc6\xa3\x5d\x30\x5b\x30\x1d\x06\x03\x55\x1d\x0e\x04"
+ "\x16\x04\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2\x3d"
+ "\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x1f\x06\x03\x55\x1d\x23\x04\x18"
+ "\x30\x16\x80\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2"
+ "\x3d\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x0c\x06\x03\x55\x1d\x13\x01"
+ "\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04\x04\x03"
+ "\x02\x07\x80\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x03"
+ "\x47\x00\x30\x44\x02\x20\x1a\xd7\xac\x07\xc8\x97\x38\xf4\x89\x43"
+ "\x7e\xc7\x66\x6e\xa5\x00\x7c\x12\x1d\xb4\x09\x76\x0c\x99\x6b\x8c"
+ "\x26\x5d\xe9\x70\x5c\xb4\x02\x20\x73\xb7\xc7\x7a\x5a\xdb\x67\x0a"
+ "\x96\x42\x19\xcf\x4f\x67\x4f\x35\x6a\xee\x29\x25\xf2\x4f\xc8\x10"
+ "\x14\x9d\x79\x69\x1c\x7a\xd7\x5d"
+};
+
+/*
+ * Signed data and detached signature blobs that form the verification tests.
+ */
+static const u8 certs_selftest_ecdsa_data[] __initconst = {
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
+ "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x2e\x0a"
+};
+
+static const u8 certs_selftest_ecdsa_sig[] __initconst = {
+ /* ECDSA signature using SHA-256 */
+ "\x30\x81\xf4\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0\x81"
+ "\xe6\x30\x81\xe3\x02\x01\x01\x31\x0f\x30\x0d\x06\x09\x60\x86\x48"
+ "\x01\x65\x03\x04\x02\x01\x05\x00\x30\x0b\x06\x09\x2a\x86\x48\x86"
+ "\xf7\x0d\x01\x07\x01\x31\x81\xbf\x30\x81\xbc\x02\x01\x01\x30\x52"
+ "\x30\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66"
+ "\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x2e\xea"
+ "\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0\xf8"
+ "\x6f\xde\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05"
+ "\x00\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x04\x48\x30"
+ "\x46\x02\x21\x00\x86\xd1\xf4\x06\xb6\x49\x79\xf9\x09\x5f\x35\x1a"
+ "\x94\x7e\x0e\x1a\x12\x4d\xd9\xe6\x2a\x2d\xcf\x2d\x0a\xee\x88\x76"
+ "\xe0\x35\xf3\xeb\x02\x21\x00\xdf\x11\x8a\xab\x31\xf6\x3c\x1f\x32"
+ "\x43\x94\xe2\xb8\x35\xc9\xf3\x12\x4e\x9b\x31\x08\x10\x5d\x8d\xe2"
+ "\x43\x0a\x5f\xf5\xfd\xa2\xf1"
+};
+
+void __init fips_signature_selftest_ecdsa(void)
+{
+ fips_signature_selftest("ECDSA",
+ certs_selftest_ecdsa_keys,
+ sizeof(certs_selftest_ecdsa_keys) - 1,
+ certs_selftest_ecdsa_data,
+ sizeof(certs_selftest_ecdsa_data) - 1,
+ certs_selftest_ecdsa_sig,
+ sizeof(certs_selftest_ecdsa_sig) - 1);
+}
diff --git a/crypto/asymmetric_keys/selftest_rsa.c b/crypto/asymmetric_keys/selftest_rsa.c
new file mode 100644
index 000000000000..09c9815e456a
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest_rsa.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Self-tests for PKCS#7 RSA signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+#include <linux/module.h>
+#include "selftest.h"
+
+/*
+ * Set of X.509 certificates to provide public keys for the tests. These will
+ * be loaded into a temporary keyring for the duration of the testing.
+ */
+static const u8 certs_selftest_rsa_keys[] __initconst = {
+ /* 4096-bit RSA certificate */
+ "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
+ "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
+ "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
+ "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
+ "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
+ "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
+ "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
+ "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
+ "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
+ "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
+ "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
+ "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
+ "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
+ "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
+ "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
+ "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
+ "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
+ "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
+ "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
+ "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
+ "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
+ "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
+ "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
+ "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
+ "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
+ "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
+ "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
+ "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
+ "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
+ "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
+ "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
+ "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
+ "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
+ "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
+ "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
+ "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
+ "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
+ "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
+ "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
+ "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
+ "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
+ "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
+ "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
+ "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
+ "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
+ "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
+ "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
+ "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
+ "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
+ "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
+ "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
+ "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
+ "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
+ "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
+ "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
+ "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
+ "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
+ "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
+ "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
+ "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
+ "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
+ "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
+ "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
+ "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
+ "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
+ "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
+ "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
+ "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
+ "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
+ "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
+ "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
+ "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
+ "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
+ "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
+ "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
+ "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
+ "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
+ "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
+ "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
+ "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
+ "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
+ "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
+ "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
+ "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
+ "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
+};
+
+/*
+ * Signed data and detached signature blobs that form the verification tests.
+ */
+static const u8 certs_selftest_rsa_data[] __initconst = {
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
+ "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x2e\x0a"
+};
+
+static const u8 certs_selftest_rsa_sig[] __initconst = {
+ /* RSA signature using PKCS#1 v1.5 padding with SHA-256 */
+ "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
+ "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
+ "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
+ "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
+ "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
+ "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
+ "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
+ "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
+ "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
+ "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
+ "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
+ "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
+ "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
+ "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
+ "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
+ "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
+ "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
+ "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
+ "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
+ "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
+ "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
+ "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
+ "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
+ "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
+ "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
+ "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
+ "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
+ "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
+ "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
+ "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
+ "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
+ "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
+ "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
+ "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
+ "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
+ "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
+ "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
+ "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
+ "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
+ "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
+ "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
+ "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
+ "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
+};
+
+void __init fips_signature_selftest_rsa(void)
+{
+ fips_signature_selftest("RSA",
+ certs_selftest_rsa_keys,
+ sizeof(certs_selftest_rsa_keys) - 1,
+ certs_selftest_rsa_data,
+ sizeof(certs_selftest_rsa_data) - 1,
+ certs_selftest_rsa_sig,
+ sizeof(certs_selftest_rsa_sig) - 1);
+}
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 2deff81f8af5..041d04b5c953 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -65,69 +65,6 @@ int query_asymmetric_key(const struct kernel_pkey_params *params,
EXPORT_SYMBOL_GPL(query_asymmetric_key);
/**
- * encrypt_blob - Encrypt data using an asymmetric key
- * @params: Various parameters
- * @data: Data blob to be encrypted, length params->data_len
- * @enc: Encrypted data buffer, length params->enc_len
- *
- * Encrypt the specified data blob using the private key specified by
- * params->key. The encrypted data is wrapped in an encoding if
- * params->encoding is specified (eg. "pkcs1").
- *
- * Returns the length of the data placed in the encrypted data buffer or an
- * error.
- */
-int encrypt_blob(struct kernel_pkey_params *params,
- const void *data, void *enc)
-{
- params->op = kernel_pkey_encrypt;
- return asymmetric_key_eds_op(params, data, enc);
-}
-EXPORT_SYMBOL_GPL(encrypt_blob);
-
-/**
- * decrypt_blob - Decrypt data using an asymmetric key
- * @params: Various parameters
- * @enc: Encrypted data to be decrypted, length params->enc_len
- * @data: Decrypted data buffer, length params->data_len
- *
- * Decrypt the specified data blob using the private key specified by
- * params->key. The decrypted data is wrapped in an encoding if
- * params->encoding is specified (eg. "pkcs1").
- *
- * Returns the length of the data placed in the decrypted data buffer or an
- * error.
- */
-int decrypt_blob(struct kernel_pkey_params *params,
- const void *enc, void *data)
-{
- params->op = kernel_pkey_decrypt;
- return asymmetric_key_eds_op(params, enc, data);
-}
-EXPORT_SYMBOL_GPL(decrypt_blob);
-
-/**
- * create_signature - Sign some data using an asymmetric key
- * @params: Various parameters
- * @data: Data blob to be signed, length params->data_len
- * @enc: Signature buffer, length params->enc_len
- *
- * Sign the specified data blob using the private key specified by params->key.
- * The signature is wrapped in an encoding if params->encoding is specified
- * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha1").
- *
- * Returns the length of the data placed in the signature buffer or an error.
- */
-int create_signature(struct kernel_pkey_params *params,
- const void *data, void *enc)
-{
- params->op = kernel_pkey_sign;
- return asymmetric_key_eds_op(params, data, enc);
-}
-EXPORT_SYMBOL_GPL(create_signature);
-
-/**
* verify_signature - Initiate the use of an asymmetric key to verify a signature
* @key: The asymmetric key to verify against
* @sig: The signature to check
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 7553ab18db89..1f3b227ba7f2 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -28,7 +28,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
const struct pe32plus_opt_hdr *pe64;
const struct data_directory *ddir;
const struct data_dirent *dde;
- const struct section_header *secs, *sec;
+ const struct section_header *sec;
size_t cursor, datalen = pelen;
kenter("");
@@ -40,13 +40,13 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
} while (0)
chkaddr(0, 0, sizeof(*mz));
- if (mz->magic != MZ_MAGIC)
+ if (mz->magic != IMAGE_DOS_SIGNATURE)
return -ELIBBAD;
cursor = sizeof(*mz);
chkaddr(cursor, mz->peaddr, sizeof(*pe));
pe = pebuf + mz->peaddr;
- if (pe->magic != PE_MAGIC)
+ if (pe->magic != IMAGE_NT_SIGNATURE)
return -ELIBBAD;
cursor = mz->peaddr + sizeof(*pe);
@@ -55,7 +55,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
pe64 = pebuf + cursor;
switch (pe32->magic) {
- case PE_OPT_MAGIC_PE32:
+ case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
chkaddr(0, cursor, sizeof(*pe32));
ctx->image_checksum_offset =
(unsigned long)&pe32->csum - (unsigned long)pebuf;
@@ -64,7 +64,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->n_data_dirents = pe32->data_dirs;
break;
- case PE_OPT_MAGIC_PE32PLUS:
+ case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
chkaddr(0, cursor, sizeof(*pe64));
ctx->image_checksum_offset =
(unsigned long)&pe64->csum - (unsigned long)pebuf;
@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
break;
default:
- pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
+ pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
- pr_debug("Unsigned PE binary\n");
+ pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
@@ -110,7 +110,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->n_sections = pe->sections;
if (ctx->n_sections > (ctx->header_size - cursor) / sizeof(*sec))
return -ELIBBAD;
- ctx->secs = secs = pebuf + cursor;
+ ctx->secs = pebuf + cursor;
return 0;
}
@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
- pr_debug("Signature wrapper too short\n");
+ pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
- /* Both pesign and sbsign round up the length of certificate table
- * (in optional header data directories) to 8 byte alignment.
+ /* sbsign rounds up the length of certificate table (in optional
+ * header data directories) to 8 byte alignment. However, the PE
+ * specification states that while entries are 8-byte aligned, this is
+ * not included in their length, and as a result, pesign has not
+ * rounded up since 0.110.
*/
- if (round_up(wrapper.length, 8) != ctx->sig_len) {
- pr_debug("Signature wrapper len wrong\n");
+ if (wrapper.length > ctx->sig_len) {
+ pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
+ ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
- pr_debug("Signature is not revision 2.0\n");
+ pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
- pr_debug("Signature certificate type is not PKCS\n");
+ pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
- pr_debug("Signature data missing\n");
+ pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
@@ -194,7 +198,7 @@ check_len:
return 0;
}
not_pkcs7:
- pr_debug("Signature data not PKCS#7\n");
+ pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
- pr_debug("Digest size mismatch (%zx != %x)\n",
- digest_size, ctx->digest_len);
+ pr_warn("Digest size mismatch (%zx != %x)\n",
+ digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
- pr_debug("Digest mismatch\n");
+ pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");
@@ -387,7 +391,7 @@ error_no_desc:
* verify_pefile_signature - Verify the signature on a PE binary image
* @pebuf: Buffer containing the PE binary image
* @pelen: Length of the binary image
- * @trust_keys: Signing certificate(s) to use as starting points
+ * @trusted_keys: Signing certificate(s) to use as starting points
* @usage: The use to which the key is being put.
*
* Validate that the certificate chain inside the PKCS#7 message inside the PE
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index 92d59c32f96a..feb9573cacce 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2008 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5280#section-4
+
Certificate ::= SEQUENCE {
tbsCertificate TBSCertificate ({ x509_note_tbs_certificate }),
signatureAlgorithm AlgorithmIdentifier,
diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1
index 1a33231a75a8..0f8355cf1907 100644
--- a/crypto/asymmetric_keys/x509_akid.asn1
+++ b/crypto/asymmetric_keys/x509_akid.asn1
@@ -1,3 +1,8 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2008 IETF Trust and the persons identified as authors
+-- of the code
+--
-- X.509 AuthorityKeyIdentifier
-- rfc5280 section 4.2.1.1
@@ -14,15 +19,15 @@ CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial })
GeneralNames ::= SEQUENCE OF GeneralName
GeneralName ::= CHOICE {
- otherName [0] ANY,
- rfc822Name [1] IA5String,
- dNSName [2] IA5String,
+ otherName [0] IMPLICIT OtherName,
+ rfc822Name [1] IMPLICIT IA5String,
+ dNSName [2] IMPLICIT IA5String,
x400Address [3] ANY,
directoryName [4] Name ({ x509_akid_note_name }),
- ediPartyName [5] ANY,
- uniformResourceIdentifier [6] IA5String,
- iPAddress [7] OCTET STRING,
- registeredID [8] OBJECT IDENTIFIER
+ ediPartyName [5] IMPLICIT EDIPartyName,
+ uniformResourceIdentifier [6] IMPLICIT IA5String,
+ iPAddress [7] IMPLICIT OCTET STRING,
+ registeredID [8] IMPLICIT OBJECT IDENTIFIER
}
Name ::= SEQUENCE OF RelativeDistinguishedName
@@ -33,3 +38,13 @@ AttributeValueAssertion ::= SEQUENCE {
attributeType OBJECT IDENTIFIER ({ x509_note_OID }),
attributeValue ANY ({ x509_extract_name_segment })
}
+
+OtherName ::= SEQUENCE {
+ type-id OBJECT IDENTIFIER,
+ value [0] ANY
+ }
+
+EDIPartyName ::= SEQUENCE {
+ nameAssigner [0] ANY OPTIONAL,
+ partyName [1] ANY
+ }
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 7a9b084e2043..b37cae914987 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -60,24 +60,23 @@ EXPORT_SYMBOL_GPL(x509_free_certificate);
*/
struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
- struct x509_certificate *cert;
- struct x509_parse_context *ctx;
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
+ struct x509_parse_context *ctx __free(kfree) = NULL;
struct asymmetric_key_id *kid;
long ret;
- ret = -ENOMEM;
cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL);
if (!cert)
- goto error_no_cert;
+ return ERR_PTR(-ENOMEM);
cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
if (!cert->pub)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL);
if (!cert->sig)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL);
if (!ctx)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
ctx->cert = cert;
ctx->data = (unsigned long)data;
@@ -85,7 +84,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
/* Attempt to decode the certificate */
ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen);
if (ret < 0)
- goto error_decode;
+ return ERR_PTR(ret);
/* Decode the AuthorityKeyIdentifier */
if (ctx->raw_akid) {
@@ -95,20 +94,19 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
ctx->raw_akid, ctx->raw_akid_size);
if (ret < 0) {
pr_warn("Couldn't decode AuthKeyIdentifier\n");
- goto error_decode;
+ return ERR_PTR(ret);
}
}
- ret = -ENOMEM;
cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
if (!cert->pub->key)
- goto error_decode;
+ return ERR_PTR(-ENOMEM);
cert->pub->keylen = ctx->key_size;
cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
if (!cert->pub->params)
- goto error_decode;
+ return ERR_PTR(-ENOMEM);
cert->pub->paramlen = ctx->params_size;
cert->pub->algo = ctx->key_algo;
@@ -116,33 +114,23 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
/* Grab the signature bits */
ret = x509_get_sig_params(cert);
if (ret < 0)
- goto error_decode;
+ return ERR_PTR(ret);
/* Generate cert issuer + serial number key ID */
kid = asymmetric_key_generate_id(cert->raw_serial,
cert->raw_serial_size,
cert->raw_issuer,
cert->raw_issuer_size);
- if (IS_ERR(kid)) {
- ret = PTR_ERR(kid);
- goto error_decode;
- }
+ if (IS_ERR(kid))
+ return ERR_CAST(kid);
cert->id = kid;
/* Detect self-signed certificates */
ret = x509_check_for_self_signed(cert);
if (ret < 0)
- goto error_decode;
-
- kfree(ctx);
- return cert;
+ return ERR_PTR(ret);
-error_decode:
- kfree(ctx);
-error_no_ctx:
- x509_free_certificate(cert);
-error_no_cert:
- return ERR_PTR(ret);
+ return_ptr(cert);
}
EXPORT_SYMBOL_GPL(x509_cert_parse);
@@ -195,15 +183,9 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
pr_debug("PubKey Algo: %u\n", ctx->last_oid);
switch (ctx->last_oid) {
- case OID_md2WithRSAEncryption:
- case OID_md3WithRSAEncryption:
default:
return -ENOPKG; /* Unsupported combination */
- case OID_md4WithRSAEncryption:
- ctx->cert->sig->hash_algo = "md4";
- goto rsa_pkcs1;
-
case OID_sha1WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha1";
goto rsa_pkcs1;
@@ -228,6 +210,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha1";
goto ecdsa;
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
+ ctx->cert->sig->hash_algo = "sha3-256";
+ goto rsa_pkcs1;
+
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_384:
+ ctx->cert->sig->hash_algo = "sha3-384";
+ goto rsa_pkcs1;
+
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_512:
+ ctx->cert->sig->hash_algo = "sha3-512";
+ goto rsa_pkcs1;
+
case OID_id_ecdsa_with_sha224:
ctx->cert->sig->hash_algo = "sha224";
goto ecdsa;
@@ -244,6 +238,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha512";
goto ecdsa;
+ case OID_id_ecdsa_with_sha3_256:
+ ctx->cert->sig->hash_algo = "sha3-256";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha3_384:
+ ctx->cert->sig->hash_algo = "sha3-384";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha3_512:
+ ctx->cert->sig->hash_algo = "sha3-512";
+ goto ecdsa;
+
case OID_gost2012Signature256:
ctx->cert->sig->hash_algo = "streebog256";
goto ecrdsa;
@@ -251,10 +257,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
case OID_gost2012Signature512:
ctx->cert->sig->hash_algo = "streebog512";
goto ecrdsa;
-
- case OID_SM2_with_SM3:
- ctx->cert->sig->hash_algo = "sm3";
- goto sm2;
}
rsa_pkcs1:
@@ -267,11 +269,6 @@ ecrdsa:
ctx->cert->sig->encoding = "raw";
ctx->sig_algo = ctx->last_oid;
return 0;
-sm2:
- ctx->cert->sig->pkey_algo = "sm2";
- ctx->cert->sig->encoding = "raw";
- ctx->sig_algo = ctx->last_oid;
- return 0;
ecdsa:
ctx->cert->sig->pkey_algo = "ecdsa";
ctx->cert->sig->encoding = "x962";
@@ -303,7 +300,6 @@ int x509_note_signature(void *context, size_t hdrlen,
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
- strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
@@ -376,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
- buffer = kmalloc(1, GFP_KERNEL);
+ buffer = kzalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buffer[0] = 0;
goto done;
}
@@ -508,17 +503,11 @@ int x509_extract_key_data(void *context, size_t hdrlen,
case OID_gost2012PKey512:
ctx->cert->pub->pkey_algo = "ecrdsa";
break;
- case OID_sm2:
- ctx->cert->pub->pkey_algo = "sm2";
- break;
case OID_id_ecPublicKey:
if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
return -EBADMSG;
switch (oid) {
- case OID_sm2:
- ctx->cert->pub->pkey_algo = "sm2";
- break;
case OID_id_prime192v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
break;
@@ -528,6 +517,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
case OID_id_ansip384r1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
break;
+ case OID_id_ansip521r1:
+ ctx->cert->pub->pkey_algo = "ecdsa-nist-p521";
+ break;
default:
return -ENOPKG;
}
@@ -579,6 +571,34 @@ int x509_process_extension(void *context, size_t hdrlen,
return 0;
}
+ if (ctx->last_oid == OID_keyUsage) {
+ /*
+ * Get hold of the keyUsage bit string
+ * v[1] is the encoding size
+ * (Expect either 0x02 or 0x03, making it 1 or 2 bytes)
+ * v[2] is the number of unused bits in the bit string
+ * (If >= 3 keyCertSign is missing when v[1] = 0x02)
+ * v[3] and possibly v[4] contain the bit string
+ *
+ * From RFC 5280 4.2.1.3:
+ * 0x04 is where keyCertSign lands in this bit string
+ * 0x80 is where digitalSignature lands in this bit string
+ */
+ if (v[0] != ASN1_BTS)
+ return -EBADMSG;
+ if (vlen < 4)
+ return -EBADMSG;
+ if (v[2] >= 8)
+ return -EBADMSG;
+ if (v[3] & 0x80)
+ ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_DIGITALSIG;
+ if (v[1] == 0x02 && v[2] <= 2 && (v[3] & 0x04))
+ ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN;
+ else if (vlen > 4 && v[1] == 0x03 && (v[3] & 0x04))
+ ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN;
+ return 0;
+ }
+
if (ctx->last_oid == OID_authorityKeyIdentifier) {
/* Get hold of the CA key fingerprint */
ctx->raw_akid = v;
@@ -586,6 +606,36 @@ int x509_process_extension(void *context, size_t hdrlen,
return 0;
}
+ if (ctx->last_oid == OID_basicConstraints) {
+ /*
+ * Get hold of the basicConstraints
+ * v[1] is the encoding size
+ * (Expect 0x00 for empty SEQUENCE with CA:FALSE, or
+ * 0x03 or greater for non-empty SEQUENCE)
+ * v[2] is the encoding type
+ * (Expect an ASN1_BOOL for the CA)
+ * v[3] is the length of the ASN1_BOOL
+ * (Expect 1 for a single byte boolean)
+ * v[4] is the contents of the ASN1_BOOL
+ * (Expect 0xFF if the CA is TRUE)
+ * vlen should match the entire extension size
+ */
+ if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ))
+ return -EBADMSG;
+ if (vlen < 2)
+ return -EBADMSG;
+ if (v[1] != vlen - 2)
+ return -EBADMSG;
+ /* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */
+ if (v[1] == 0)
+ return 0;
+ if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF)
+ ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA;
+ else
+ return -EBADMSG;
+ return 0;
+ }
+
return 0;
}
diff --git a/crypto/asymmetric_keys/x509_loader.c b/crypto/asymmetric_keys/x509_loader.c
index 1bc169dee22e..a41741326998 100644
--- a/crypto/asymmetric_keys/x509_loader.c
+++ b/crypto/asymmetric_keys/x509_loader.c
@@ -55,3 +55,4 @@ dodgy_cert:
pr_err("Problem parsing in-kernel X.509 certificate list\n");
return 0;
}
+EXPORT_SYMBOL_GPL(x509_load_certificate_list);
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index a299c9c56f40..0688c222806b 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -5,6 +5,7 @@
* Written by David Howells (dhowells@redhat.com)
*/
+#include <linux/cleanup.h>
#include <linux/time.h>
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
@@ -41,18 +42,11 @@ struct x509_certificate {
};
/*
- * selftest.c
- */
-#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
-extern int __init fips_signature_selftest(void);
-#else
-static inline int fips_signature_selftest(void) { return 0; }
-#endif
-
-/*
* x509_cert_parser.c
*/
extern void x509_free_certificate(struct x509_certificate *cert);
+DEFINE_FREE(x509_free_certificate, struct x509_certificate *,
+ if (!IS_ERR(_T)) x509_free_certificate(_T))
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
extern int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 0b4943a4592b..12e3341e806b 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -6,13 +6,14 @@
*/
#define pr_fmt(fmt) "X.509: "fmt
+#include <crypto/hash.h>
+#include <keys/asymmetric-parser.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/system_keyring.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <keys/asymmetric-subtype.h>
-#include <keys/asymmetric-parser.h>
-#include <keys/system_keyring.h>
-#include <crypto/hash.h>
+#include <linux/string.h>
#include "asymmetric_keys.h"
#include "x509_parser.h"
@@ -30,9 +31,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
pr_devel("==>%s()\n", __func__);
- sig->data = cert->tbs;
- sig->data_size = cert->tbs_size;
-
sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL);
if (!sig->s)
return -ENOMEM;
@@ -65,7 +63,9 @@ int x509_get_sig_params(struct x509_certificate *cert)
desc->tfm = tfm;
- ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
+ ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size,
+ sig->digest);
+
if (ret < 0)
goto error_2;
@@ -117,6 +117,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
goto out;
}
+ if (cert->unsupported_sig) {
+ ret = 0;
+ goto out;
+ }
+
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {
@@ -143,12 +148,11 @@ not_self_signed:
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
- struct asymmetric_key_ids *kids;
- struct x509_certificate *cert;
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
+ struct asymmetric_key_ids *kids __free(kfree) = NULL;
+ char *p, *desc __free(kfree) = NULL;
const char *q;
size_t srlen, sulen;
- char *desc = NULL, *p;
- int ret;
cert = x509_cert_parse(prep->data, prep->datalen);
if (IS_ERR(cert))
@@ -170,9 +174,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
}
/* Don't permit addition of blacklisted keys */
- ret = -EKEYREJECTED;
if (cert->blacklisted)
- goto error_free_cert;
+ return -EKEYREJECTED;
/* Propose a description */
sulen = strlen(cert->subject);
@@ -184,10 +187,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
q = cert->raw_serial;
}
- ret = -ENOMEM;
desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
if (!desc)
- goto error_free_cert;
+ return -ENOMEM;
p = memcpy(desc, cert->subject, sulen);
p += sulen;
*p++ = ':';
@@ -197,16 +199,14 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL);
if (!kids)
- goto error_free_desc;
+ return -ENOMEM;
kids->id[0] = cert->id;
kids->id[1] = cert->skid;
kids->id[2] = asymmetric_key_generate_id(cert->raw_subject,
cert->raw_subject_size,
"", 0);
- if (IS_ERR(kids->id[2])) {
- ret = PTR_ERR(kids->id[2]);
- goto error_free_kids;
- }
+ if (IS_ERR(kids->id[2]))
+ return PTR_ERR(kids->id[2]);
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
@@ -224,15 +224,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->sig = NULL;
desc = NULL;
kids = NULL;
- ret = 0;
-
-error_free_kids:
- kfree(kids);
-error_free_desc:
- kfree(desc);
-error_free_cert:
- x509_free_certificate(cert);
- return ret;
+ return 0;
}
static struct asymmetric_key_parser x509_key_parser = {
@@ -244,15 +236,9 @@ static struct asymmetric_key_parser x509_key_parser = {
/*
* Module stuff
*/
-extern int __init certs_selftest(void);
static int __init x509_key_init(void)
{
- int ret;
-
- ret = register_asymmetric_key_parser(&x509_key_parser);
- if (ret < 0)
- return ret;
- return fips_signature_selftest();
+ return register_asymmetric_key_parser(&x509_key_parser);
}
static void __exit x509_key_exit(void)
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f9cdc5e91664..9e4bb7fbde25 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -11,8 +11,8 @@
#include <linux/async_tx.h>
#include <linux/gfp.h>
-/**
- * pq_scribble_page - space to hold throwaway P or Q buffer for
+/*
+ * struct pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
@@ -28,7 +28,7 @@ static struct page *pq_scribble_page;
#define MAX_DISKS 255
-/**
+/*
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
@@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
return tx;
}
-/**
+/*
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
@@ -119,7 +119,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
for (i = 0; i < disks; i++) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
- srcs[i] = (void*)raid6_empty_zero_page;
+ srcs[i] = raid6_get_zero_page();
} else {
srcs[i] = page_address(blocks[i]) + offsets[i];
@@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
- * @offset: common offset into each block (src and dest) to start transaction
+ * @offsets: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 354b8cd5537f..539ea5b378dc 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -414,7 +414,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = (void *) raid6_empty_zero_page;
+ ptrs[i] = raid6_get_zero_page();
else
ptrs[i] = page_address(blocks[i]) + offs[i];
@@ -497,7 +497,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = (void*)raid6_empty_zero_page;
+ ptrs[i] = raid6_get_zero_page();
else
ptrs[i] = page_address(blocks[i]) + offs[i];
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 9256934312d7..ad72057a5e0d 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
- * submit_disposition - flags for routing an incoming operation
+ * enum submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
- * @tx - transaction to quiesce
+ * @tx: transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 1a3855284091..2c499654a36c 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset,
}
EXPORT_SYMBOL_GPL(async_xor_val_offs);
-/**
- * async_xor_val - attempt a xor parity check with a dma engine.
- * @dest: destination page used if the xor is performed synchronously
- * @src_list: array of source pages
- * @offset: offset in pages to start transaction
- * @src_cnt: number of source pages
- * @len: length in bytes
- * @result: 0 if sum == 0 else non-zero
- * @submit: submission / completion modifiers
- *
- * honored flags: ASYNC_TX_ACK
- *
- * src_list note: if the dest is also a source it must be at index zero.
- * The contents of this array will be overwritten if a scribble region
- * is not specified.
- */
-struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit)
-{
- return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
- len, result, submit);
-}
-EXPORT_SYMBOL_GPL(async_xor_val);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 17f674a7cdff..ac679ce2cb95 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -9,7 +9,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -28,7 +27,6 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -39,7 +37,7 @@ struct authenc_request_ctx {
static void authenc_request_complete(struct aead_request *req, int err)
{
- if (err != -EINPROGRESS)
+ if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
}
@@ -109,27 +107,42 @@ out:
return err;
}
-static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+static void authenc_geniv_ahash_finish(struct aead_request *req)
{
- struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
- if (err)
- goto out;
-
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
+}
-out:
+static void authenc_geniv_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
aead_request_complete(req, err);
}
-static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
+/*
+ * Used when the ahash request was invoked in the async callback context
+ * of the previous skcipher request. Eat any EINPROGRESS notifications.
+ */
+static void authenc_geniv_ahash_done2(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
+ authenc_request_complete(req, err);
+}
+
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -138,17 +151,16 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
- crypto_ahash_alignmask(auth) + 1);
-
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
- authenc_geniv_ahash_done, req);
+ mask ? authenc_geniv_ahash_done2 :
+ authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
@@ -160,35 +172,18 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
return 0;
}
-static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
-
- if (err)
- goto out;
+ struct aead_request *areq = data;
- err = crypto_authenc_genicv(areq, 0);
-
-out:
+ if (err) {
+ aead_request_complete(areq, err);
+ return;
+ }
+ err = crypto_authenc_genicv(areq, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(areq, err);
}
-static int crypto_authenc_copy_assoc(struct aead_request *req)
-{
- struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@@ -207,10 +202,7 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_copy_assoc(req);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, req->assoclen);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@@ -223,11 +215,18 @@ static int crypto_authenc_encrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_genicv(req, aead_request_flags(req));
+ return crypto_authenc_genicv(req, 0);
+}
+
+static void authenc_decrypt_tail_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
- unsigned int flags)
+ unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -238,6 +237,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
@@ -254,24 +254,24 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
- req->base.complete, req->base.data);
+ mask ? authenc_decrypt_tail_done :
+ req->base.complete,
+ mask ? req : req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
-
- if (err)
- goto out;
-
- err = crypto_authenc_decrypt_tail(req, 0);
+ struct aead_request *req = data;
-out:
+ if (err) {
+ aead_request_complete(req, err);
+ return;
+ }
+ err = crypto_authenc_decrypt_tail(req, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(req, err);
}
@@ -288,9 +288,6 @@ static int crypto_authenc_decrypt(struct aead_request *req)
u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
- crypto_ahash_alignmask(auth) + 1);
-
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->src, hash,
req->assoclen + req->cryptlen - authsize);
@@ -301,7 +298,7 @@ static int crypto_authenc_decrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
+ return crypto_authenc_decrypt_tail(req, 0);
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
@@ -311,7 +308,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -323,14 +319,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@@ -344,8 +334,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -357,7 +345,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
@@ -375,9 +362,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
+ struct skcipher_alg_common *enc;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
@@ -400,10 +387,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
- enc = crypto_spawn_skcipher_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg_common(&ctx->enc);
- ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
- auth_base->cra_alignmask + 1);
+ ctx->reqoff = 2 * auth->digestsize;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -420,12 +406,11 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
- inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_init_tfm;
@@ -461,7 +446,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-subsys_initcall(crypto_authenc_module_init);
+module_init(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index b60e61b1904c..d1bf0fda3f2e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -12,7 +12,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -87,11 +85,8 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_ahash *auth = ctx->auth;
- u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *hash = areq_ctx->tail;
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
@@ -107,10 +102,9 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
return 0;
}
-static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_geniv_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
aead_request_complete(req, err);
@@ -123,8 +117,7 @@ static int crypto_authenc_esn_genicv(struct aead_request *req,
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
- u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *hash = areq_ctx->tail;
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
@@ -153,10 +146,9 @@ static int crypto_authenc_esn_genicv(struct aead_request *req,
}
-static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_esn_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
+ struct aead_request *areq = data;
if (!err)
err = crypto_authenc_esn_genicv(areq, 0);
@@ -164,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
authenc_esn_request_complete(areq, err);
}
-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
-{
- struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@@ -196,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_esn_copy(req, assoclen);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, assoclen);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@@ -226,8 +201,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ctx->reqoff);
struct crypto_ahash *auth = ctx->auth;
- u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *ohash = areq_ctx->tail;
unsigned int cryptlen = req->cryptlen - authsize;
unsigned int assoclen = req->assoclen;
struct scatterlist *dst = req->dst;
@@ -258,10 +232,9 @@ decrypt:
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
authenc_esn_request_complete(req, err);
@@ -275,8 +248,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
- u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *ohash = areq_ctx->tail;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
@@ -286,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
cryptlen -= authsize;
- if (req->src != dst) {
- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
- if (err)
- return err;
- }
+ if (req->src != dst)
+ memcpy_sglist(dst, req->src, assoclen + cryptlen);
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
@@ -326,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -338,17 +306,10 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
- ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
- crypto_ahash_alignmask(auth) + 1);
+ ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
crypto_aead_set_reqsize(
tfm,
@@ -362,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -375,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -393,9 +351,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
u32 mask;
struct aead_instance *inst;
struct authenc_esn_instance_ctx *ctx;
+ struct skcipher_alg_common *enc;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
@@ -418,7 +376,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
- enc = crypto_spawn_skcipher_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg_common(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -434,12 +392,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
- inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_esn_init_tfm;
@@ -476,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-subsys_initcall(crypto_authenc_esn_module_init);
+module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blake2b.c b/crypto/blake2b.c
new file mode 100644
index 000000000000..67a6dae43a54
--- /dev/null
+++ b/crypto/blake2b.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for BLAKE2b
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+struct blake2b_tfm_ctx {
+ unsigned int keylen;
+ u8 key[BLAKE2B_KEY_SIZE];
+};
+
+static int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+ return 0;
+}
+
+#define BLAKE2B_CTX(desc) ((struct blake2b_ctx *)shash_desc_ctx(desc))
+
+static int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ unsigned int digestsize = crypto_shash_digestsize(desc->tfm);
+
+ blake2b_init_key(BLAKE2B_CTX(desc), digestsize,
+ tctx->key, tctx->keylen);
+ return 0;
+}
+
+static int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ blake2b_update(BLAKE2B_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_blake2b_final(struct shash_desc *desc, u8 *out)
+{
+ blake2b_final(BLAKE2B_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_blake2b_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ unsigned int digestsize = crypto_shash_digestsize(desc->tfm);
+
+ blake2b(tctx->key, tctx->keylen, data, len, out, digestsize);
+ return 0;
+}
+
+#define BLAKE2B_ALG(name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = name "-lib", \
+ .base.cra_priority = 300, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2b_setkey, \
+ .init = crypto_blake2b_init, \
+ .update = crypto_blake2b_update, \
+ .final = crypto_blake2b_final, \
+ .digest = crypto_blake2b_digest, \
+ .descsize = sizeof(struct blake2b_ctx), \
+ }
+
+static struct shash_alg algs[] = {
+ BLAKE2B_ALG("blake2b-160", BLAKE2B_160_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-256", BLAKE2B_256_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-384", BLAKE2B_384_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-512", BLAKE2B_512_HASH_SIZE),
+};
+
+static int __init crypto_blake2b_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_blake2b_mod_init);
+
+static void __exit crypto_blake2b_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_blake2b_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for BLAKE2b");
+
+MODULE_ALIAS_CRYPTO("blake2b-160");
+MODULE_ALIAS_CRYPTO("blake2b-160-lib");
+MODULE_ALIAS_CRYPTO("blake2b-256");
+MODULE_ALIAS_CRYPTO("blake2b-256-lib");
+MODULE_ALIAS_CRYPTO("blake2b-384");
+MODULE_ALIAS_CRYPTO("blake2b-384-lib");
+MODULE_ALIAS_CRYPTO("blake2b-512");
+MODULE_ALIAS_CRYPTO("blake2b-512-lib");
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
deleted file mode 100644
index 6704c0355889..000000000000
--- a/crypto/blake2b_generic.c
+++ /dev/null
@@ -1,187 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
-/*
- * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b
- * reference implementation, but it has been heavily modified for use in the
- * kernel. The reference implementation was:
- *
- * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under
- * the terms of the CC0, the OpenSSL Licence, or the Apache Public License
- * 2.0, at your option. The terms of these licenses can be found at:
- *
- * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- * - OpenSSL license : https://www.openssl.org/source/license.html
- * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
- *
- * More information about BLAKE2 can be found at https://blake2.net.
- */
-
-#include <asm/unaligned.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <crypto/internal/blake2b.h>
-#include <crypto/internal/hash.h>
-
-static const u8 blake2b_sigma[12][16] = {
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
- { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
- { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
- { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
- { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
- { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
- { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
- { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
- { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
-};
-
-static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
-{
- S->t[0] += inc;
- S->t[1] += (S->t[0] < inc);
-}
-
-#define G(r,i,a,b,c,d) \
- do { \
- a = a + b + m[blake2b_sigma[r][2*i+0]]; \
- d = ror64(d ^ a, 32); \
- c = c + d; \
- b = ror64(b ^ c, 24); \
- a = a + b + m[blake2b_sigma[r][2*i+1]]; \
- d = ror64(d ^ a, 16); \
- c = c + d; \
- b = ror64(b ^ c, 63); \
- } while (0)
-
-#define ROUND(r) \
- do { \
- G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
- G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
- G(r,2,v[ 2],v[ 6],v[10],v[14]); \
- G(r,3,v[ 3],v[ 7],v[11],v[15]); \
- G(r,4,v[ 0],v[ 5],v[10],v[15]); \
- G(r,5,v[ 1],v[ 6],v[11],v[12]); \
- G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
- G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
- } while (0)
-
-static void blake2b_compress_one_generic(struct blake2b_state *S,
- const u8 block[BLAKE2B_BLOCK_SIZE])
-{
- u64 m[16];
- u64 v[16];
- size_t i;
-
- for (i = 0; i < 16; ++i)
- m[i] = get_unaligned_le64(block + i * sizeof(m[i]));
-
- for (i = 0; i < 8; ++i)
- v[i] = S->h[i];
-
- v[ 8] = BLAKE2B_IV0;
- v[ 9] = BLAKE2B_IV1;
- v[10] = BLAKE2B_IV2;
- v[11] = BLAKE2B_IV3;
- v[12] = BLAKE2B_IV4 ^ S->t[0];
- v[13] = BLAKE2B_IV5 ^ S->t[1];
- v[14] = BLAKE2B_IV6 ^ S->f[0];
- v[15] = BLAKE2B_IV7 ^ S->f[1];
-
- ROUND(0);
- ROUND(1);
- ROUND(2);
- ROUND(3);
- ROUND(4);
- ROUND(5);
- ROUND(6);
- ROUND(7);
- ROUND(8);
- ROUND(9);
- ROUND(10);
- ROUND(11);
-#ifdef CONFIG_CC_IS_CLANG
-#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */
-#endif
- for (i = 0; i < 8; ++i)
- S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
-}
-
-#undef G
-#undef ROUND
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc)
-{
- do {
- blake2b_increment_counter(state, inc);
- blake2b_compress_one_generic(state, block);
- block += BLAKE2B_BLOCK_SIZE;
- } while (--nblocks);
-}
-EXPORT_SYMBOL(blake2b_compress_generic);
-
-static int crypto_blake2b_update_generic(struct shash_desc *desc,
- const u8 *in, unsigned int inlen)
-{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
-}
-
-static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
-{
- return crypto_blake2b_final(desc, out, blake2b_compress_generic);
-}
-
-#define BLAKE2B_ALG(name, driver_name, digest_size) \
- { \
- .base.cra_name = name, \
- .base.cra_driver_name = driver_name, \
- .base.cra_priority = 100, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
- .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
- .base.cra_module = THIS_MODULE, \
- .digestsize = digest_size, \
- .setkey = crypto_blake2b_setkey, \
- .init = crypto_blake2b_init, \
- .update = crypto_blake2b_update_generic, \
- .final = crypto_blake2b_final_generic, \
- .descsize = sizeof(struct blake2b_state), \
- }
-
-static struct shash_alg blake2b_algs[] = {
- BLAKE2B_ALG("blake2b-160", "blake2b-160-generic",
- BLAKE2B_160_HASH_SIZE),
- BLAKE2B_ALG("blake2b-256", "blake2b-256-generic",
- BLAKE2B_256_HASH_SIZE),
- BLAKE2B_ALG("blake2b-384", "blake2b-384-generic",
- BLAKE2B_384_HASH_SIZE),
- BLAKE2B_ALG("blake2b-512", "blake2b-512-generic",
- BLAKE2B_512_HASH_SIZE),
-};
-
-static int __init blake2b_mod_init(void)
-{
- return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
-}
-
-static void __exit blake2b_mod_fini(void)
-{
- crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
-}
-
-subsys_initcall(blake2b_mod_init);
-module_exit(blake2b_mod_fini);
-
-MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
-MODULE_DESCRIPTION("BLAKE2b generic implementation");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("blake2b-160");
-MODULE_ALIAS_CRYPTO("blake2b-160-generic");
-MODULE_ALIAS_CRYPTO("blake2b-256");
-MODULE_ALIAS_CRYPTO("blake2b-256-generic");
-MODULE_ALIAS_CRYPTO("blake2b-384");
-MODULE_ALIAS_CRYPTO("blake2b-384-generic");
-MODULE_ALIAS_CRYPTO("blake2b-512");
-MODULE_ALIAS_CRYPTO("blake2b-512-generic");
diff --git a/crypto/blowfish_common.c b/crypto/blowfish_common.c
index 1c072012baff..c0208ce269a3 100644
--- a/crypto/blowfish_common.c
+++ b/crypto/blowfish_common.c
@@ -14,11 +14,12 @@
* Copyright (c) Kyle McMartin <kyle@debian.org>
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
+
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
-#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 003b52c6880e..f3c5f9b09850 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -11,11 +11,12 @@
* Copyright (c) Kyle McMartin <kyle@debian.org>
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
+
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/unaligned.h>
-#include <linux/crypto.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
@@ -123,7 +124,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(blowfish_mod_init);
+module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c
new file mode 100644
index 000000000000..a88798d3e8c8
--- /dev/null
+++ b/crypto/bpf_crypto_skcipher.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Meta, Inc */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/bpf_crypto.h>
+#include <crypto/skcipher.h>
+
+static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo)
+{
+ return crypto_alloc_lskcipher(algo, 0, 0);
+}
+
+static void bpf_crypto_lskcipher_free_tfm(void *tfm)
+{
+ crypto_free_lskcipher(tfm);
+}
+
+static int bpf_crypto_lskcipher_has_algo(const char *algo)
+{
+ return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK);
+}
+
+static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen)
+{
+ return crypto_lskcipher_setkey(tfm, key, keylen);
+}
+
+static u32 bpf_crypto_lskcipher_get_flags(void *tfm)
+{
+ return crypto_lskcipher_get_flags(tfm);
+}
+
+static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm)
+{
+ return crypto_lskcipher_ivsize(tfm);
+}
+
+static unsigned int bpf_crypto_lskcipher_statesize(void *tfm)
+{
+ return crypto_lskcipher_statesize(tfm);
+}
+
+static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *siv)
+{
+ return crypto_lskcipher_encrypt(tfm, src, dst, len, siv);
+}
+
+static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *siv)
+{
+ return crypto_lskcipher_decrypt(tfm, src, dst, len, siv);
+}
+
+static const struct bpf_crypto_type bpf_crypto_lskcipher_type = {
+ .alloc_tfm = bpf_crypto_lskcipher_alloc_tfm,
+ .free_tfm = bpf_crypto_lskcipher_free_tfm,
+ .has_algo = bpf_crypto_lskcipher_has_algo,
+ .setkey = bpf_crypto_lskcipher_setkey,
+ .encrypt = bpf_crypto_lskcipher_encrypt,
+ .decrypt = bpf_crypto_lskcipher_decrypt,
+ .ivsize = bpf_crypto_lskcipher_ivsize,
+ .statesize = bpf_crypto_lskcipher_statesize,
+ .get_flags = bpf_crypto_lskcipher_get_flags,
+ .owner = THIS_MODULE,
+ .name = "skcipher",
+};
+
+static int __init bpf_crypto_skcipher_init(void)
+{
+ return bpf_crypto_register_type(&bpf_crypto_lskcipher_type);
+}
+
+static void __exit bpf_crypto_skcipher_exit(void)
+{
+ int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type);
+ WARN_ON_ONCE(err);
+}
+
+module_init(bpf_crypto_skcipher_init);
+module_exit(bpf_crypto_skcipher_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher support for BPF");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index fd1a88af9e77..ee4336a04b93 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -9,13 +9,13 @@
* https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
*/
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitops.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static const u32 camellia_sp1110[256] = {
0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
@@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-subsys_initcall(camellia_init);
+module_init(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 0257c14cefc2..f68330793e0c 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -13,9 +13,9 @@
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
#include <linux/init.h>
-#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast5_mod_init);
+module_init(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 75346380aa0b..4c08c42646f0 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -10,9 +10,9 @@
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
#include <linux/init.h>
-#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast6_mod_init);
+module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast_common.c b/crypto/cast_common.c
index 9b2f60fd4cef..fec1f6609a40 100644
--- a/crypto/cast_common.c
+++ b/crypto/cast_common.c
@@ -282,4 +282,5 @@ __visible const u32 cast_s4[256] = {
};
EXPORT_SYMBOL_GPL(cast_s4);
+MODULE_DESCRIPTION("Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)");
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6c03e96b945f..ed3df6246765 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -5,8 +5,6 @@
* Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -14,99 +12,72 @@
#include <linux/log2.h>
#include <linux/module.h>
-static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
- do {
+ for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) {
crypto_xor(iv, src, bsize);
- fn(tfm, dst, iv);
+ crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL);
memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
+ }
return nbytes;
}
-static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
+ crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_encrypt(struct skcipher_request *req)
+static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL;
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_encrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
+ if (src == dst)
+ rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv);
- return err;
+ return rem && final ? -EINVAL : rem;
}
-static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ const u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
- fn(tfm, dst, src);
+ crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL);
crypto_xor(dst, iv, bsize);
iv = src;
@@ -114,83 +85,76 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ if (nbytes < bsize)
+ goto out;
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
- fn(tfm, src, src);
+ crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
+ crypto_xor(src, iv, bsize);
+ memcpy(iv, last_iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt(struct skcipher_request *req)
+static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL;
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_decrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_decrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
+ if (src == dst)
+ rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv);
- return err;
+ return rem && final ? -EINVAL : rem;
}
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
+ struct lskcipher_instance *inst;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb);
+ inst = lskcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
- alg = skcipher_ialg_simple(inst);
-
err = -EINVAL;
- if (!is_power_of_2(alg->cra_blocksize))
+ if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
+ goto out_free_inst;
+
+ if (inst->alg.co.statesize)
goto out_free_inst;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
- err = skcipher_register_instance(tmpl, inst);
+ err = lskcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
@@ -215,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-subsys_initcall(crypto_cbc_module_init);
+module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 6b815ece51c6..2ae929ffdef8 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -10,11 +10,12 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
@@ -54,10 +55,6 @@ struct cbcmac_tfm_ctx {
struct crypto_cipher *child;
};
-struct cbcmac_desc_ctx {
- unsigned int len;
-};
-
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
@@ -218,15 +215,15 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
cryptlen += ilen;
}
- ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen);
+ ahash_request_set_crypt(ahreq, plain, odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
}
-static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_ccm_encrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
u8 *odata = pctx->odata;
@@ -320,10 +317,9 @@ static int crypto_ccm_encrypt(struct aead_request *req)
return err;
}
-static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
- int err)
+static void crypto_ccm_decrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
@@ -448,10 +444,10 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *mac_name)
{
+ struct skcipher_alg_common *ctr;
u32 mask;
struct aead_instance *inst;
struct ccm_instance_ctx *ictx;
- struct skcipher_alg *ctr;
struct hash_alg_common *mac;
int err;
@@ -479,13 +475,12 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr_name, 0, mask);
if (err)
goto err_free_inst;
- ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
+ ctr = crypto_spawn_skcipher_alg_common(&ictx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
- crypto_skcipher_alg_ivsize(ctr) != 16 ||
- ctr->base.cra_blocksize != 1)
+ ctr->ivsize != 16 || ctr->base.cra_blocksize != 1)
goto err_free_inst;
/* ctr and cbcmac must use the same underlying block cipher. */
@@ -505,10 +500,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = mac->base.cra_alignmask |
- ctr->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = ctr->base.cra_alignmask;
inst->alg.ivsize = 16;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
+ inst->alg.chunksize = ctr->chunksize;
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
inst->alg.init = crypto_ccm_init_tfm;
@@ -785,13 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs;
+ u8 *dg = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(dg, 0, bs);
-
return 0;
}
@@ -800,40 +791,33 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
-
- while (len > 0) {
- unsigned int l = min(len, bs - ctx->len);
-
- crypto_xor(dg + ctx->len, p, l);
- ctx->len +=l;
- len -= l;
- p += l;
-
- if (ctx->len == bs) {
- crypto_cipher_encrypt_one(tfm, dg, dg);
- ctx->len = 0;
- }
- }
+ u8 *dg = shash_desc_ctx(pdesc);
- return 0;
+ do {
+ crypto_xor(dg, p, bs);
+ crypto_cipher_encrypt_one(tfm, dg, dg);
+ p += bs;
+ len -= bs;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
-
- if (ctx->len)
- crypto_cipher_encrypt_one(tfm, dg, dg);
+ u8 *dg = shash_desc_ctx(pdesc);
+ if (len) {
+ crypto_xor(dg, src, len);
+ crypto_cipher_encrypt_one(tfm, out, dg);
+ return 0;
+ }
memcpy(out, dg, bs);
return 0;
}
@@ -888,20 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx),
- alg->cra_alignmask + 1) +
- alg->cra_blocksize;
+ inst->alg.descsize = alg->cra_blocksize;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
inst->alg.base.cra_init = cbcmac_init_tfm;
inst->alg.base.cra_exit = cbcmac_exit_tfm;
inst->alg.init = crypto_cbcmac_digest_init;
inst->alg.update = crypto_cbcmac_digest_update;
- inst->alg.final = crypto_cbcmac_digest_final;
+ inst->alg.finup = crypto_cbcmac_digest_finup;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -946,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-subsys_initcall(crypto_ccm_module_init);
+module_init(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
@@ -955,4 +938,4 @@ MODULE_ALIAS_CRYPTO("ccm_base");
MODULE_ALIAS_CRYPTO("rfc4309");
MODULE_ALIAS_CRYPTO("ccm");
MODULE_ALIAS_CRYPTO("cbcmac");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/cfb.c b/crypto/cfb.c
deleted file mode 100644
index 5c36b7b65e2a..000000000000
--- a/crypto/cfb.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * CFB: Cipher FeedBack mode
- *
- * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
- *
- * CFB is a stream cipher mode which is layered on to a block
- * encryption scheme. It works very much like a one time pad where
- * the pad is generated initially from the encrypted IV and then
- * subsequently from the encrypted previous block of ciphertext. The
- * pad is XOR'd into the plain text to get the final ciphertext.
- *
- * The scheme of CFB is best described by wikipedia:
- *
- * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
- *
- * Note that since the pad for both encryption and decryption is
- * generated by an encryption operation, CFB never uses the block
- * decryption function.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
-{
- return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
-}
-
-static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
- const u8 *src, u8 *dst)
-{
- crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
-}
-
-/* final encrypt and decrypt is the same */
-static void crypto_cfb_final(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
- u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
- u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
- unsigned int nbytes = walk->nbytes;
-
- crypto_cfb_encrypt_one(tfm, iv, stream);
- crypto_xor_cpy(dst, stream, src, nbytes);
-}
-
-static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- const unsigned int bsize = crypto_cfb_bsize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_cfb_encrypt_one(tfm, iv, dst);
- crypto_xor(dst, src, bsize);
- iv = dst;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- const unsigned int bsize = crypto_cfb_bsize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
- u8 tmp[MAX_CIPHER_BLOCKSIZE];
-
- do {
- crypto_cfb_encrypt_one(tfm, iv, tmp);
- crypto_xor(src, tmp, bsize);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static int crypto_cfb_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- unsigned int bsize = crypto_cfb_bsize(tfm);
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes >= bsize) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cfb_encrypt_inplace(&walk, tfm);
- else
- err = crypto_cfb_encrypt_segment(&walk, tfm);
- err = skcipher_walk_done(&walk, err);
- }
-
- if (walk.nbytes) {
- crypto_cfb_final(&walk, tfm);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
-}
-
-static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- const unsigned int bsize = crypto_cfb_bsize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_cfb_encrypt_one(tfm, iv, dst);
- crypto_xor(dst, src, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- const unsigned int bsize = crypto_cfb_bsize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 * const iv = walk->iv;
- u8 tmp[MAX_CIPHER_BLOCKSIZE];
-
- do {
- crypto_cfb_encrypt_one(tfm, iv, tmp);
- memcpy(iv, src, bsize);
- crypto_xor(src, tmp, bsize);
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
- struct crypto_skcipher *tfm)
-{
- if (walk->src.virt.addr == walk->dst.virt.addr)
- return crypto_cfb_decrypt_inplace(walk, tfm);
- else
- return crypto_cfb_decrypt_segment(walk, tfm);
-}
-
-static int crypto_cfb_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- const unsigned int bsize = crypto_cfb_bsize(tfm);
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes >= bsize) {
- err = crypto_cfb_decrypt_blocks(&walk, tfm);
- err = skcipher_walk_done(&walk, err);
- }
-
- if (walk.nbytes) {
- crypto_cfb_final(&walk, tfm);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
-}
-
-static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- inst = skcipher_alloc_instance_simple(tmpl, tb);
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- alg = skcipher_ialg_simple(inst);
-
- /* CFB mode is a stream cipher. */
- inst->alg.base.cra_blocksize = 1;
-
- /*
- * To simplify the implementation, configure the skcipher walk to only
- * give a partial block at the very end, never earlier.
- */
- inst->alg.chunksize = alg->cra_blocksize;
-
- inst->alg.encrypt = crypto_cfb_encrypt;
- inst->alg.decrypt = crypto_cfb_decrypt;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err)
- inst->free(inst);
-
- return err;
-}
-
-static struct crypto_template crypto_cfb_tmpl = {
- .name = "cfb",
- .create = crypto_cfb_create,
- .module = THIS_MODULE,
-};
-
-static int __init crypto_cfb_module_init(void)
-{
- return crypto_register_template(&crypto_cfb_tmpl);
-}
-
-static void __exit crypto_cfb_module_exit(void)
-{
- crypto_unregister_template(&crypto_cfb_tmpl);
-}
-
-subsys_initcall(crypto_cfb_module_init);
-module_exit(crypto_cfb_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CFB block cipher mode of operation");
-MODULE_ALIAS_CRYPTO("cfb");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/chacha_generic.c b/crypto/chacha.c
index 8beea79ab117..ec16d5a33f3c 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha.c
@@ -1,27 +1,61 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
+ * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers
*
* Copyright (C) 2015 Martin Willi
* Copyright (C) 2018 Google LLC
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
+#include <crypto/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static int chacha_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static int chacha20_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int chacha12_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
+ const struct chacha_ctx *ctx,
+ const u8 iv[CHACHA_IV_SIZE])
{
struct skcipher_walk walk;
- u32 state[16];
+ struct chacha_state state;
int err;
err = skcipher_walk_virt(&walk, req, false);
- chacha_init_generic(state, ctx->key, iv);
+ chacha_init(&state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -29,8 +63,8 @@ static int chacha_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total)
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
+ chacha_crypt(&state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
@@ -40,7 +74,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
static int crypto_chacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv);
}
@@ -48,14 +82,14 @@ static int crypto_chacha_crypt(struct skcipher_request *req)
static int crypto_xchacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct chacha_ctx subctx;
- u32 state[16];
+ struct chacha_state state;
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init_generic(state, ctx->key, req->iv);
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
+ chacha_init(&state, ctx->key, req->iv);
+ hchacha_block(&state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
@@ -69,8 +103,8 @@ static int crypto_xchacha_crypt(struct skcipher_request *req)
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
+ .base.cra_driver_name = "chacha20-lib",
+ .base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
@@ -82,10 +116,11 @@ static struct skcipher_alg algs[] = {
.setkey = chacha20_setkey,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
- }, {
+ },
+ {
.base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-generic",
- .base.cra_priority = 100,
+ .base.cra_driver_name = "xchacha20-lib",
+ .base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
@@ -97,10 +132,11 @@ static struct skcipher_alg algs[] = {
.setkey = chacha20_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
- }, {
+ },
+ {
.base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-generic",
- .base.cra_priority = 100,
+ .base.cra_driver_name = "xchacha12-lib",
+ .base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
@@ -115,25 +151,25 @@ static struct skcipher_alg algs[] = {
}
};
-static int __init chacha_generic_mod_init(void)
+static int __init crypto_chacha_mod_init(void)
{
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
-static void __exit chacha_generic_mod_fini(void)
+static void __exit crypto_chacha_mod_fini(void)
{
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(chacha_generic_mod_init);
-module_exit(chacha_generic_mod_fini);
+module_init(crypto_chacha_mod_init);
+module_exit(crypto_chacha_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
+MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("chacha20-lib");
MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20-lib");
MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-generic");
+MODULE_ALIAS_CRYPTO("xchacha12-lib");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 97bbb135e9a6..b4b5a7198d84 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -12,34 +12,21 @@
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/string.h>
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
- struct crypto_ahash_spawn poly;
unsigned int saltlen;
};
struct chachapoly_ctx {
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
- u8 salt[];
-};
-
-struct poly_req {
- /* zero byte padding for AD/ciphertext, as needed */
- u8 pad[POLY1305_BLOCK_SIZE];
- /* tail data with AD/ciphertext lengths */
- struct {
- __le64 assoclen;
- __le64 cryptlen;
- } tail;
- struct scatterlist src[1];
- struct ahash_request req; /* must be last member */
+ u8 salt[] __counted_by(saltlen);
};
struct chacha_req {
@@ -62,7 +49,6 @@ struct chachapoly_req_ctx {
/* request flags, with MAY_SLEEP cleared if needed */
u32 flags;
union {
- struct poly_req poly;
struct chacha_req chacha;
} u;
};
@@ -105,19 +91,9 @@ static int poly_verify_tag(struct aead_request *req)
return 0;
}
-static int poly_copy_tag(struct aead_request *req)
+static void chacha_decrypt_done(void *data, int err)
{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- scatterwalk_map_and_copy(rctx->tag, req->dst,
- req->assoclen + rctx->cryptlen,
- sizeof(rctx->tag), 1);
- return 0;
-}
-
-static void chacha_decrypt_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_verify_tag);
+ async_done_continue(data, err, poly_verify_tag);
}
static int chacha_decrypt(struct aead_request *req)
@@ -151,210 +127,76 @@ skip:
return poly_verify_tag(req);
}
-static int poly_tail_continue(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- if (rctx->cryptlen == req->cryptlen) /* encrypting */
- return poly_copy_tag(req);
-
- return chacha_decrypt(req);
-}
-
-static void poly_tail_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_tail_continue);
-}
-
-static int poly_tail(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
- preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
- sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_tail_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src,
- rctx->tag, sizeof(preq->tail));
-
- err = crypto_ahash_finup(&preq->req);
- if (err)
- return err;
-
- return poly_tail_continue(req);
-}
-
-static void poly_cipherpad_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_tail);
-}
-
-static int poly_cipherpad(struct aead_request *req)
+static int poly_hash(struct aead_request *req)
{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
+ const void *zp = page_address(ZERO_PAGE(0));
+ struct scatterlist *sg = req->src;
+ struct poly1305_desc_ctx desc;
+ struct scatter_walk walk;
+ struct {
+ union {
+ struct {
+ __le64 assoclen;
+ __le64 cryptlen;
+ };
+ u8 u8[16];
+ };
+ } tail;
unsigned int padlen;
- int err;
-
- padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipherpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+ unsigned int total;
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_tail(req);
-}
-
-static void poly_cipher_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_cipherpad);
-}
-
-static int poly_cipher(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- struct scatterlist *crypt = req->src;
- int err;
+ if (sg != req->dst)
+ memcpy_sglist(req->dst, sg, req->assoclen);
if (rctx->cryptlen == req->cryptlen) /* encrypting */
- crypt = req->dst;
-
- crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipher_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
+ sg = req->dst;
- return poly_cipherpad(req);
-}
+ poly1305_init(&desc, rctx->key);
+ scatterwalk_start(&walk, sg);
-static void poly_adpad_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_cipher);
-}
+ total = rctx->assoclen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
-static int poly_adpad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- unsigned int padlen;
- int err;
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_adpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_cipher(req);
-}
-
-static void poly_ad_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_adpad);
-}
-
-static int poly_ad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_ad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_adpad(req);
-}
-
-static void poly_setkey_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_ad);
-}
+ poly1305_update(&desc, zp, padlen);
-static int poly_setkey(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ scatterwalk_skip(&walk, req->assoclen - rctx->assoclen);
- sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
+ total = rctx->cryptlen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_setkey_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_ad(req);
-}
-
-static void poly_init_done(struct crypto_async_request *areq, int err)
-{
- async_done_continue(areq->data, err, poly_setkey);
-}
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
-static int poly_init(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
+ poly1305_update(&desc, zp, padlen);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_init_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
+ tail.assoclen = cpu_to_le64(rctx->assoclen);
+ tail.cryptlen = cpu_to_le64(rctx->cryptlen);
+ poly1305_update(&desc, tail.u8, sizeof(tail));
+ memzero_explicit(&tail, sizeof(tail));
+ poly1305_final(&desc, rctx->tag);
- err = crypto_ahash_init(&preq->req);
- if (err)
- return err;
+ if (rctx->cryptlen != req->cryptlen)
+ return chacha_decrypt(req);
- return poly_setkey(req);
+ memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag));
+ return 0;
}
-static void poly_genkey_done(struct crypto_async_request *areq, int err)
+static void poly_genkey_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_init);
+ async_done_continue(data, err, poly_hash);
}
static int poly_genkey(struct aead_request *req)
@@ -388,12 +230,12 @@ static int poly_genkey(struct aead_request *req)
if (err)
return err;
- return poly_init(req);
+ return poly_hash(req);
}
-static void chacha_encrypt_done(struct crypto_async_request *areq, int err)
+static void chacha_encrypt_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_genkey);
+ async_done_continue(data, err, poly_genkey);
}
static int chacha_encrypt(struct aead_request *req)
@@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req)
/* encrypt call chain:
* - chacha_encrypt/done()
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
- * - poly_copy_tag()
+ * - poly_hash()
*/
return chacha_encrypt(req);
}
@@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req)
/* decrypt call chain:
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
+ * - poly_hash()
* - chacha_decrypt/done()
* - poly_verify_tag()
*/
@@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm)
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
unsigned long align;
- poly = crypto_spawn_ahash(&ictx->poly);
- if (IS_ERR(poly))
- return PTR_ERR(poly);
-
chacha = crypto_spawn_skcipher(&ictx->chacha);
- if (IS_ERR(chacha)) {
- crypto_free_ahash(poly);
+ if (IS_ERR(chacha))
return PTR_ERR(chacha);
- }
ctx->chacha = chacha;
- ctx->poly = poly;
ctx->saltlen = ictx->saltlen;
align = crypto_aead_alignmask(tfm);
@@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
- max(offsetof(struct chacha_req, req) +
- sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(chacha),
- offsetof(struct poly_req, req) +
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(poly)));
+ offsetof(struct chacha_req, req) +
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(chacha));
return 0;
}
@@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_free_ahash(ctx->poly);
crypto_free_skcipher(ctx->chacha);
}
@@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst)
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->chacha);
- crypto_drop_ahash(&ctx->poly);
kfree(inst);
}
@@ -558,8 +374,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
u32 mask;
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
- struct skcipher_alg *chacha;
- struct hash_alg_common *poly;
+ struct skcipher_alg_common *chacha;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
@@ -579,19 +394,14 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
- chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
-
- err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[2]), 0, mask);
- if (err)
- goto err_free_inst;
- poly = crypto_spawn_ahash_alg(&ctx->poly);
+ chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha);
err = -EINVAL;
- if (poly->digestsize != POLY1305_DIGEST_SIZE)
+ if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") &&
+ strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic"))
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
- if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
+ if (chacha->ivsize != CHACHA_IV_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)
@@ -599,23 +409,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_name,
- poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305)", name,
+ chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_driver_name,
- poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305-generic)", name,
+ chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_priority = (chacha->base.cra_priority +
- poly->base.cra_priority) / 2;
+ inst->alg.base.cra_priority = chacha->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
- poly->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = chacha->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
ctx->saltlen;
inst->alg.ivsize = ivsize;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
+ inst->alg.chunksize = chacha->chunksize;
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
inst->alg.init = chachapoly_init;
inst->alg.exit = chachapoly_exit;
@@ -668,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-subsys_initcall(chacha20poly1305_module_init);
+module_init(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cipher.c b/crypto/cipher.c
index b47141ed4a9f..1fe62bf79656 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kfree_sensitive(buffer);
return ret;
}
@@ -54,7 +53,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm,
return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
}
-EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL");
static inline void cipher_crypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src, bool enc)
@@ -82,11 +81,39 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
{
cipher_crypt_one(tfm, dst, src, true);
}
-EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL");
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
cipher_crypt_one(tfm, dst, src, false);
}
-EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL");
+
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher)
+{
+ struct crypto_tfm *tfm = crypto_cipher_tfm(cipher);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_cipher *ncipher;
+ struct crypto_tfm *ntfm;
+
+ if (alg->cra_init)
+ return ERR_PTR(-ENOSYS);
+
+ if (unlikely(!crypto_mod_get(alg)))
+ return ERR_PTR(-ESTALE);
+
+ ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC);
+ if (IS_ERR(ntfm)) {
+ crypto_mod_put(alg);
+ return ERR_CAST(ntfm);
+ }
+
+ ntfm->crt_flags = tfm->crt_flags;
+
+ ncipher = __crypto_cipher_cast(ntfm);
+
+ return ncipher;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_cipher);
diff --git a/crypto/cmac.c b/crypto/cmac.c
index f4a5d3bfb376..1b03964abe00 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -13,9 +13,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* +------------------------
@@ -28,33 +31,15 @@
*/
struct cmac_tfm_ctx {
struct crypto_cipher *child;
- u8 ctx[];
-};
-
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | cmac_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct cmac_desc_ctx {
- unsigned int len;
- u8 ctx[];
+ __be64 consts[];
};
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
- __be64 *consts = PTR_ALIGN((void *)ctx->ctx,
- (alignmask | (__alignof__(__be64) - 1)) + 1);
+ __be64 *consts = ctx->consts;
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
@@ -104,14 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
- unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -119,104 +100,75 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
+ u8 *prev = shash_desc_ctx(pdesc);
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
-
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN((void *)tctx->ctx,
- (alignmask | (__alignof__(__be64) - 1)) + 1);
- u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len - 1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
+ crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
+ crypto_cipher_encrypt_one(tfm, out, prev);
+ return 0;
+}
- crypto_xor(prev, odds, bs);
- crypto_xor(prev, consts + offset, bs);
+static int cmac_init_tfm(struct crypto_shash *tfm)
+{
+ struct shash_instance *inst = shash_alg_instance(tfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+ struct crypto_cipher_spawn *spawn;
+ struct crypto_cipher *cipher;
- crypto_cipher_encrypt_one(tfm, out, prev);
+ spawn = shash_instance_ctx(inst);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
return 0;
}
-static int cmac_init_tfm(struct crypto_tfm *tfm)
+static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm)
{
+ struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
- struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- cipher = crypto_spawn_cipher(spawn);
+ cipher = crypto_clone_cipher(octx->child);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
-};
+}
-static void cmac_exit_tfm(struct crypto_tfm *tfm)
+static void cmac_exit_tfm(struct crypto_shash *tfm)
{
- struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
crypto_free_cipher(ctx->child);
}
@@ -225,7 +177,6 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
- unsigned long alignmask;
u32 mask;
int err;
@@ -257,30 +208,22 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alignmask = alg->cra_alignmask;
- inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
+ alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize =
- ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment())
- + (alignmask & ~(crypto_tfm_ctx_alignment() - 1))
- + alg->cra_blocksize * 2;
-
- inst->alg.base.cra_ctxsize =
- ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
- + ((alignmask | (__alignof__(__be64) - 1)) &
- ~(crypto_tfm_ctx_alignment() - 1))
- + alg->cra_blocksize * 2;
-
- inst->alg.base.cra_init = cmac_init_tfm;
- inst->alg.base.cra_exit = cmac_exit_tfm;
-
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
- inst->alg.final = crypto_cmac_digest_final;
+ inst->alg.finup = crypto_cmac_digest_finup;
inst->alg.setkey = crypto_cmac_digest_setkey;
+ inst->alg.init_tfm = cmac_init_tfm;
+ inst->alg.clone_tfm = cmac_clone_tfm;
+ inst->alg.exit_tfm = cmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
@@ -308,10 +251,10 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-subsys_initcall(crypto_cmac_module_init);
+module_init(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("cmac");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/compress.c b/crypto/compress.c
deleted file mode 100644
index 9048fe390c46..000000000000
--- a/crypto/compress.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * Compression operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- */
-#include <linux/crypto.h>
-#include "internal.h"
-
-int crypto_comp_compress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_compress);
-
-int crypto_comp_decompress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_decompress);
diff --git a/crypto/compress.h b/crypto/compress.h
new file mode 100644
index 000000000000..f7737a1fcbbd
--- /dev/null
+++ b/crypto/compress.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_COMPRESS_H
+#define _LOCAL_CRYPTO_COMPRESS_H
+
+#include "internal.h"
+
+struct acomp_req;
+struct comp_alg_common;
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/crc32_generic.c b/crypto/crc32.c
index a989cb44fd16..489cbed9422e 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32.c
@@ -7,7 +7,7 @@
* This is crypto api shash wrappers to crc32_le.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
@@ -64,8 +64,7 @@ static int crc32_update(struct shash_desc *desc, const u8 *data,
}
/* No final XOR 0xFFFFFFFF, like crc32_le */
-static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(crc32_le(*crcp, data, len), out);
return 0;
@@ -88,28 +87,27 @@ static int crc32_final(struct shash_desc *desc, u8 *out)
static int crc32_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
+ return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, out);
}
+
static struct shash_alg alg = {
- .setkey = crc32_setkey,
- .init = crc32_init,
- .update = crc32_update,
- .final = crc32_final,
- .finup = crc32_finup,
- .digest = crc32_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = crc32_cra_init,
- }
+ .setkey = crc32_setkey,
+ .init = crc32_init,
+ .update = crc32_update,
+ .final = crc32_final,
+ .finup = crc32_finup,
+ .digest = crc32_digest,
+ .descsize = sizeof(u32),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+
+ .base.cra_name = "crc32",
+ .base.cra_driver_name = "crc32-lib",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(u32),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_init = crc32_cra_init,
};
static int __init crc32_mod_init(void)
@@ -122,11 +120,10 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(crc32_mod_init);
+module_init(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");
-MODULE_ALIAS_CRYPTO("crc32-generic");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c
index 768614738541..1eff54dde2f7 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c.c
@@ -30,7 +30,7 @@
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = __crc32c_le(ctx->crc, data, length);
+ ctx->crc = crc32c(ctx->crc, data, length);
return 0;
}
@@ -99,7 +99,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c(*crcp, data, len), out);
return 0;
}
@@ -128,24 +128,23 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
}
static struct shash_alg alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crc32c_cra_init,
- }
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksum_update,
+ .final = chksum_final,
+ .finup = chksum_finup,
+ .digest = chksum_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+
+ .base.cra_name = "crc32c",
+ .base.cra_driver_name = "crc32c-lib",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct chksum_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_init = crc32c_cra_init,
};
static int __init crc32c_mod_init(void)
@@ -158,11 +157,10 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(crc32c_mod_init);
+module_init(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
-MODULE_ALIAS_CRYPTO("crc32c-generic");
diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c
deleted file mode 100644
index 9e812bb26dba..000000000000
--- a/crypto/crc64_rocksoft_generic.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/crc64.h>
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-#include <asm/unaligned.h>
-
-static int chksum_init(struct shash_desc *desc)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = crc64_rocksoft_generic(*crc, data, length);
-
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le64(*crc, out);
- return 0;
-}
-
-static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out)
-{
- crc = crc64_rocksoft_generic(crc, data, len);
- put_unaligned_le64(crc, out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- return __chksum_finup(*crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = sizeof(u64),
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(u64),
- .base = {
- .cra_name = CRC64_ROCKSOFT_STRING,
- .cra_driver_name = "crc64-rocksoft-generic",
- .cra_priority = 200,
- .cra_blocksize = 1,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crc64_rocksoft_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc64_rocksoft_exit(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc64_rocksoft_init);
-module_exit(crc64_rocksoft_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation.");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic");
diff --git a/crypto/crct10dif_common.c b/crypto/crct10dif_common.c
deleted file mode 100644
index b2fab366f518..000000000000
--- a/crypto/crct10dif_common.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/crc-t10dif.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-/* Table generated using the following polynomium:
- * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
- * gt: 0x8bb7
- */
-static const __u16 t10_dif_crc_table[256] = {
- 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
- 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
- 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
- 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
- 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
- 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
- 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
- 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
- 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
- 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
- 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
- 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
- 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
- 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
- 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
- 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
- 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
- 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
- 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
- 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
- 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
- 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
- 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
- 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
- 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
- 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
- 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
- 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
- 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
- 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
- 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
- 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
-};
-
-__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
-{
- unsigned int i;
-
- for (i = 0 ; i < len ; i++)
- crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
-
- return crc;
-}
-EXPORT_SYMBOL(crc_t10dif_generic);
-
-MODULE_DESCRIPTION("T10 DIF CRC calculation common code");
-MODULE_LICENSE("GPL");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
deleted file mode 100644
index e843982073bb..000000000000
--- a/crypto/crct10dif_generic.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-/*
- * Steps through buffer one byte at a time, calculates reflected
- * crc using table.
- */
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
-{
- *(__u16 *)out = crc_t10dif_generic(crc, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crct10dif",
- .cra_driver_name = "crct10dif-generic",
- .cra_priority = 100,
- .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crct10dif_mod_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit crct10dif_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-subsys_initcall(crct10dif_mod_init);
-module_exit(crct10dif_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation.");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 668095eca0fa..cd38f4676176 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
static struct workqueue_struct *cryptd_wq;
struct cryptd_cpu_queue {
+ local_lock_t bh_lock;
struct crypto_queue queue;
struct work_struct work;
};
@@ -68,11 +69,11 @@ struct aead_instance_ctx {
struct cryptd_skcipher_ctx {
refcount_t refcnt;
- struct crypto_sync_skcipher *child;
+ struct crypto_skcipher *child;
};
struct cryptd_skcipher_request_ctx {
- crypto_completion_t complete;
+ struct skcipher_request req;
};
struct cryptd_hash_ctx {
@@ -82,6 +83,7 @@ struct cryptd_hash_ctx {
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ void *data;
struct shash_desc desc;
};
@@ -91,7 +93,7 @@ struct cryptd_aead_ctx {
};
struct cryptd_aead_request_ctx {
- crypto_completion_t complete;
+ struct aead_request req;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -109,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ local_lock_init(&cpu_queue->bh_lock);
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
@@ -134,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_t *refcnt;
local_bh_disable();
+ local_lock_nested_bh(&queue->cpu_queue->bh_lock);
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
@@ -150,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_inc(refcnt);
out:
+ local_unlock_nested_bh(&queue->cpu_queue->bh_lock);
local_bh_enable();
return err;
@@ -168,16 +173,18 @@ static void cryptd_queue_worker(struct work_struct *work)
* Only handle one request at a time to avoid hogging crypto workqueue.
*/
local_bh_disable();
+ __local_lock_nested_bh(&cpu_queue->bh_lock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
+ __local_unlock_nested_bh(&cpu_queue->bh_lock);
local_bh_enable();
if (!req)
return;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
+ crypto_request_complete(backlog, -EINPROGRESS);
+ crypto_request_complete(req, 0);
if (cpu_queue->queue.qlen)
queue_work(cryptd_wq, &cpu_queue->work);
@@ -227,84 +234,85 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_sync_skcipher *child = ctx->child;
+ struct crypto_skcipher *child = ctx->child;
- crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(child,
- crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child,
+ crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ return crypto_skcipher_setkey(child, key, keylen);
}
-static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+static struct skcipher_request *cryptd_skcipher_prepare(
+ struct skcipher_request *req, int err)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- int refcnt = refcount_read(&ctx->refcnt);
-
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
- crypto_free_skcipher(tfm);
-}
+ struct skcipher_request *subreq = &rctx->req;
+ struct cryptd_skcipher_ctx *ctx;
+ struct crypto_skcipher *child;
-static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
- int err)
-{
- struct skcipher_request *req = skcipher_request_cast(base);
- struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *child = ctx->child;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ req->base.complete = subreq->base.complete;
+ req->base.data = subreq->base.data;
if (unlikely(err == -EINPROGRESS))
- goto out;
+ return NULL;
+
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ child = ctx->child;
- skcipher_request_set_sync_tfm(subreq, child);
+ skcipher_request_set_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
-
- req->base.complete = rctx->complete;
-
-out:
- cryptd_skcipher_complete(req, err);
+ return subreq;
}
-static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
- int err)
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
+ crypto_completion_t complete)
{
- struct skcipher_request *req = skcipher_request_cast(base);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *child = ctx->child;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ struct skcipher_request *subreq = &rctx->req;
+ int refcnt = refcount_read(&ctx->refcnt);
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ local_bh_disable();
+ skcipher_request_complete(req, err);
+ local_bh_enable();
- skcipher_request_set_sync_tfm(subreq, child);
- skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
+ if (unlikely(err == -EINPROGRESS)) {
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
+ req->base.complete = complete;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(tfm);
+}
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+static void cryptd_skcipher_encrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
- req->base.complete = rctx->complete;
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_encrypt(subreq);
-out:
- cryptd_skcipher_complete(req, err);
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
+}
+
+static void cryptd_skcipher_decrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
+
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_decrypt(subreq);
+
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
}
static int cryptd_skcipher_enqueue(struct skcipher_request *req,
@@ -312,11 +320,14 @@ static int cryptd_skcipher_enqueue(struct skcipher_request *req,
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_request *subreq = &rctx->req;
struct cryptd_queue *queue;
queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
- rctx->complete = req->base.complete;
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
@@ -343,9 +354,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = (struct crypto_sync_skcipher *)cipher;
+ ctx->child = cipher;
crypto_skcipher_set_reqsize(
- tfm, sizeof(struct cryptd_skcipher_request_ctx));
+ tfm, sizeof(struct cryptd_skcipher_request_ctx) +
+ crypto_skcipher_reqsize(cipher));
return 0;
}
@@ -353,7 +365,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->child);
+ crypto_free_skcipher(ctx->child);
}
static void cryptd_skcipher_free(struct skcipher_instance *inst)
@@ -371,7 +383,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
{
struct skcipherd_instance_ctx *ctx;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
+ struct skcipher_alg_common *alg;
u32 type;
u32 mask;
int err;
@@ -390,17 +402,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(&ctx->spawn);
+ alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+ inst->alg.ivsize = alg->ivsize;
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize;
+ inst->alg.max_keysize = alg->max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
@@ -421,12 +433,12 @@ err_free_inst:
return err;
}
-static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct ahash_instance *inst = ahash_alg_instance(tfm);
+ struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
@@ -434,15 +446,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
return PTR_ERR(hash);
ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
-static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
+ struct crypto_ahash *tfm)
+{
+ struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(ctx->child);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ nctx->child = hash;
+ return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
crypto_free_shash(ctx->child);
}
@@ -468,45 +495,63 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
+ rctx->data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
-static void cryptd_hash_complete(struct ahash_request *req, int err)
+static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
+ int err)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ req->base.complete = rctx->complete;
+ req->base.data = rctx->data;
+
+ if (unlikely(err == -EINPROGRESS))
+ return NULL;
+
+ return &rctx->desc;
+}
+
+static void cryptd_hash_complete(struct ahash_request *req, int err,
+ crypto_completion_t complete)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
int refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
- rctx->complete(&req->base, err);
+ ahash_request_complete(req, err);
local_bh_enable();
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
+ if (err == -EINPROGRESS) {
+ req->base.complete = complete;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_ahash(tfm);
}
-static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_init(void *data, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct ahash_request *req = data;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct shash_desc *desc;
- if (unlikely(err == -EINPROGRESS))
+ desc = cryptd_hash_prepare(req, err);
+ if (unlikely(!desc))
goto out;
desc->tfm = child;
err = crypto_shash_init(desc);
- req->base.complete = rctx->complete;
-
out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_init);
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
@@ -514,22 +559,16 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
-static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_update(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
-
- rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- err = shash_ahash_update(req, &rctx->desc);
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- req->base.complete = rctx->complete;
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = shash_ahash_update(req, desc);
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_update);
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
@@ -537,20 +576,16 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
-static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_final(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = crypto_shash_final(desc, req->result);
- err = crypto_shash_final(&rctx->desc, req->result);
-
- req->base.complete = rctx->complete;
-
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_final);
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
@@ -558,20 +593,16 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- err = shash_ahash_finup(req, &rctx->desc);
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- req->base.complete = rctx->complete;
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = shash_ahash_finup(req, desc);
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_finup);
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
@@ -579,25 +610,24 @@ static int cryptd_hash_finup_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_finup);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_digest(void *data, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct ahash_request *req = data;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct shash_desc *desc;
- if (unlikely(err == -EINPROGRESS))
+ desc = cryptd_hash_prepare(req, err);
+ if (unlikely(!desc))
goto out;
desc->tfm = child;
err = shash_ahash_digest(req, desc);
- req->base.complete = rctx->complete;
-
out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_digest);
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
@@ -668,8 +698,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
- inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.init_tfm = cryptd_hash_init_tfm;
+ inst->alg.clone_tfm = cryptd_hash_clone_tfm;
+ inst->alg.exit_tfm = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
@@ -710,56 +741,74 @@ static int cryptd_aead_setauthsize(struct crypto_aead *parent,
}
static void cryptd_aead_crypt(struct aead_request *req,
- struct crypto_aead *child,
- int err,
- int (*crypt)(struct aead_request *req))
+ struct crypto_aead *child, int err,
+ int (*crypt)(struct aead_request *req),
+ crypto_completion_t compl)
{
struct cryptd_aead_request_ctx *rctx;
+ struct aead_request *subreq;
struct cryptd_aead_ctx *ctx;
- crypto_completion_t compl;
struct crypto_aead *tfm;
int refcnt;
rctx = aead_request_ctx(req);
- compl = rctx->complete;
+ subreq = &rctx->req;
+ req->base.complete = subreq->base.complete;
+ req->base.data = subreq->base.data;
tfm = crypto_aead_reqtfm(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- aead_request_set_tfm(req, child);
- err = crypt( req );
+
+ aead_request_set_tfm(subreq, child);
+ aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ err = crypt(subreq);
out:
ctx = crypto_aead_ctx(tfm);
refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
- compl(&req->base, err);
+ aead_request_complete(req, err);
local_bh_enable();
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
+ if (err == -EINPROGRESS) {
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
+ req->base.complete = compl;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_aead(tfm);
}
-static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
+static void cryptd_aead_encrypt(void *data, int err)
{
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
+ struct aead_request *req = data;
+ struct cryptd_aead_ctx *ctx;
+ struct crypto_aead *child;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
+ ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ child = ctx->child;
+ cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
+ cryptd_aead_encrypt);
}
-static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
+static void cryptd_aead_decrypt(void *data, int err)
{
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
+ struct aead_request *req = data;
+ struct cryptd_aead_ctx *ctx;
+ struct crypto_aead *child;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
+ ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ child = ctx->child;
+ cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
+ cryptd_aead_decrypt);
}
static int cryptd_aead_enqueue(struct aead_request *req,
@@ -768,9 +817,12 @@ static int cryptd_aead_enqueue(struct aead_request *req,
struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
+ struct aead_request *subreq = &rctx->req;
- rctx->complete = req->base.complete;
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
@@ -798,8 +850,8 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
ctx->child = cipher;
crypto_aead_set_reqsize(
- tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
- crypto_aead_reqsize(cipher)));
+ tfm, sizeof(struct cryptd_aead_request_ctx) +
+ crypto_aead_reqsize(cipher));
return 0;
}
@@ -883,7 +935,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_SKCIPHER:
+ case CRYPTO_ALG_TYPE_LSKCIPHER:
return cryptd_create_skcipher(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_HASH:
return cryptd_create_hash(tmpl, tb, algt, &queue);
@@ -931,7 +983,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
- return &ctx->child->base;
+ return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
@@ -1063,7 +1115,8 @@ static int __init cryptd_init(void)
{
int err;
- cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
+ cryptd_wq = alloc_workqueue("cryptd",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE | WQ_PERCPU,
1);
if (!cryptd_wq)
return -ENOMEM;
@@ -1092,7 +1145,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl);
}
-subsys_initcall(cryptd_init);
+module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index bb8e77077f02..18e1689efe12 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -7,15 +7,27 @@
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <crypto/engine.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
+struct crypto_engine_alg {
+ struct crypto_alg base;
+ struct crypto_engine_op op;
+};
+
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
@@ -26,9 +38,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_req = false;
- int ret;
- struct crypto_engine_ctx *enginectx;
/*
* If hardware cannot enqueue more requests
@@ -38,23 +47,13 @@ static void crypto_finalize_request(struct crypto_engine *engine,
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
- finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
- if (finalize_req || engine->retry_support) {
- enginectx = crypto_tfm_ctx(req->tfm);
- if (enginectx->op.prepare_request &&
- enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- }
lockdep_assert_in_softirq();
- req->complete(req, err);
+ crypto_request_complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
}
@@ -72,10 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
+ struct crypto_engine_alg *alg;
+ struct crypto_engine_op *op;
unsigned long flags;
- bool was_busy = false;
int ret;
- struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -83,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!engine->retry_support && engine->cur_req)
goto out;
- /* If another context is idling then defer */
- if (engine->idling) {
- kthread_queue_work(engine->kworker, &engine->pump_requests);
- goto out;
- }
-
/* Check if the engine queue is idle */
if (!crypto_queue_len(&engine->queue) || !engine->running) {
if (!engine->busy)
@@ -102,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
engine->busy = false;
- engine->idling = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (engine->unprepare_crypt_hardware &&
- engine->unprepare_crypt_hardware(engine))
- dev_err(engine->dev, "failed to unprepare crypt hardware\n");
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->idling = false;
goto out;
}
@@ -129,42 +113,15 @@ start_request:
if (!engine->retry_support)
engine->cur_req = async_req;
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- if (engine->busy)
- was_busy = true;
- else
+ if (!engine->busy)
engine->busy = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- /* Until here we get the request need to be encrypted successfully */
- if (!was_busy && engine->prepare_crypt_hardware) {
- ret = engine->prepare_crypt_hardware(engine);
- if (ret) {
- dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err_2;
- }
- }
-
- enginectx = crypto_tfm_ctx(async_req->tfm);
-
- if (enginectx->op.prepare_request) {
- ret = enginectx->op.prepare_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err_2;
- }
- }
- if (!enginectx->op.do_one_request) {
- dev_err(engine->dev, "failed to do request\n");
- ret = -EINVAL;
- goto req_err_1;
- }
-
- ret = enginectx->op.do_one_request(engine, async_req);
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
+ ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
@@ -180,18 +137,6 @@ start_request:
ret);
goto req_err_1;
}
- /*
- * If retry mechanism is supported,
- * unprepare current request and
- * enqueue it back into crypto-engine queue.
- */
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine,
- async_req);
- if (ret)
- dev_err(engine->dev,
- "failed to unprepare request\n");
- }
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
@@ -207,16 +152,12 @@ start_request:
goto retry;
req_err_1:
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, async_req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
-
-req_err_2:
- async_req->complete(async_req, ret);
+ crypto_request_complete(async_req, ret);
retry:
+ if (backlog)
+ crypto_request_complete(backlog, -EINPROGRESS);
+
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -227,17 +168,6 @@ retry:
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
- /*
- * Batch requests is possible only if
- * hardware can enqueue multiple requests
- */
- if (engine->do_batch_requests) {
- ret = engine->do_batch_requests(engine);
- if (ret)
- dev_err(engine->dev, "failed to do batch requests: %d\n",
- ret);
- }
-
return;
}
@@ -494,12 +424,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
- * @cbk_do_batch: pointer to a callback function to be invoked when executing
- * a batch of requests.
- * This has the form:
- * callback(struct crypto_engine *engine)
- * where:
- * @engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
@@ -508,7 +432,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
*/
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct crypto_engine *engine;
@@ -524,14 +447,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
engine->rt = rt;
engine->running = false;
engine->busy = false;
- engine->idling = false;
engine->retry_support = retry_support;
engine->priv_data = dev;
- /*
- * Batch requests is possible only if
- * hardware has support for retry mechanism.
- */
- engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
@@ -539,7 +456,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
- engine->kworker = kthread_create_worker(0, "%s", engine->name);
+ engine->kworker = kthread_run_worker(0, "%s", engine->name);
if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
@@ -566,7 +483,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
- return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+ return crypto_engine_alloc_init_and_set(dev, false, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
@@ -574,22 +491,175 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**
* crypto_engine_exit - free the resources of hardware engine when exit
* @engine: the hardware engine need to be freed
- *
- * Return 0 for success.
*/
-int crypto_engine_exit(struct crypto_engine *engine)
+void crypto_engine_exit(struct crypto_engine *engine)
{
int ret;
ret = crypto_engine_stop(engine);
if (ret)
- return ret;
+ return;
kthread_destroy_worker(engine->kworker);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_exit);
+
+int crypto_engine_register_aead(struct aead_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+ return crypto_register_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
+
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
+{
+ crypto_unregister_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
+
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_aead(&algs[i]);
+ if (ret)
+ goto err;
+ }
return 0;
+
+err:
+ crypto_engine_unregister_aeads(algs, i);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(crypto_engine_exit);
+EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
+
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_aead(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+ return crypto_register_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
+
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
+{
+ crypto_unregister_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
+
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_ahash(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_ahashes(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
+
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+ return crypto_register_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
+
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
+{
+ crypto_unregister_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+ return crypto_register_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
+
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
+{
+ crypto_unregister_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+ return crypto_register_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
+
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
+{
+ return crypto_unregister_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
+
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_skcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_skciphers(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
+
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_skcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 5b84b0f7cc17..34588f39fdfc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -15,25 +15,11 @@
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mm.h>
#include <linux/string.h>
-static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
-static struct crypto_sync_skcipher *crypto_default_null_skcipher;
-static int crypto_default_null_skcipher_refcnt;
-
-static int null_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- if (slen > *dlen)
- return -EINVAL;
- memcpy(dst, src, slen);
- *dlen = slen;
- return 0;
-}
-
static int null_init(struct shash_desc *desc)
{
return 0;
@@ -75,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int null_skcipher_crypt(struct skcipher_request *req)
{
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src, req->cryptlen);
+ return 0;
}
static struct shash_alg digest_null = {
@@ -121,7 +97,7 @@ static struct skcipher_alg skcipher_null = {
.decrypt = null_skcipher_crypt,
};
-static struct crypto_alg null_algs[] = { {
+static struct crypto_alg cipher_null = {
.cra_name = "cipher_null",
.cra_driver_name = "cipher_null-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
@@ -134,62 +110,16 @@ static struct crypto_alg null_algs[] = { {
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
-}, {
- .cra_name = "compress_null",
- .cra_driver_name = "compress_null-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_blocksize = NULL_BLOCK_SIZE,
- .cra_ctxsize = 0,
- .cra_module = THIS_MODULE,
- .cra_u = { .compress = {
- .coa_compress = null_compress,
- .coa_decompress = null_compress } }
-} };
+};
-MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *tfm;
-
- mutex_lock(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
-
- if (!tfm) {
- tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
- if (IS_ERR(tfm))
- goto unlock;
-
- crypto_default_null_skcipher = tfm;
- }
-
- crypto_default_null_skcipher_refcnt++;
-
-unlock:
- mutex_unlock(&crypto_default_null_skcipher_lock);
-
- return tfm;
-}
-EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
-
-void crypto_put_default_null_skcipher(void)
-{
- mutex_lock(&crypto_default_null_skcipher_lock);
- if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_sync_skcipher(crypto_default_null_skcipher);
- crypto_default_null_skcipher = NULL;
- }
- mutex_unlock(&crypto_default_null_skcipher_lock);
-}
-EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
-
static int __init crypto_null_mod_init(void)
{
int ret = 0;
- ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
+ ret = crypto_register_alg(&cipher_null);
if (ret < 0)
goto out;
@@ -206,19 +136,19 @@ static int __init crypto_null_mod_init(void)
out_unregister_shash:
crypto_unregister_shash(&digest_null);
out_unregister_algs:
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
out:
return ret;
}
static void __exit crypto_null_mod_fini(void)
{
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
crypto_unregister_shash(&digest_null);
crypto_unregister_skcipher(&skcipher_null);
}
-subsys_initcall(crypto_null_mod_init);
+module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user.c
index 3fa20f12989f..aad429bef03e 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user.c
@@ -18,7 +18,6 @@
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
#include "internal.h"
@@ -33,7 +32,7 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
@@ -85,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
sizeof(rcipher), &rcipher);
}
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_comp rcomp;
-
- memset(&rcomp, 0, sizeof(rcomp));
-
- strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-
- return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
@@ -137,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- if (crypto_report_comp(skb, alg))
- goto nla_put_failure;
-
- break;
}
out:
@@ -387,6 +370,13 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_del_default_rng();
}
+static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ /* No longer supported */
+ return -ENOTSUPP;
+}
+
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
deleted file mode 100644
index 154884bf9275..000000000000
--- a/crypto/crypto_user_stat.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Crypto user configuration API.
- *
- * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
- *
- */
-
-#include <linux/crypto.h>
-#include <linux/cryptouser.h>
-#include <linux/sched.h>
-#include <net/netlink.h>
-#include <net/sock.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/rng.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
-
-#include "internal.h"
-
-#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
-
-struct crypto_dump_info {
- struct sk_buff *in_skb;
- struct sk_buff *out_skb;
- u32 nlmsg_seq;
- u16 nlmsg_flags;
-};
-
-static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_aead raead;
-
- memset(&raead, 0, sizeof(raead));
-
- strscpy(raead.type, "aead", sizeof(raead.type));
-
- raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
- raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
- raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
- raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
- raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
-static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_cipher rcipher;
-
- memset(&rcipher, 0, sizeof(rcipher));
-
- strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
-}
-
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_compress rcomp;
-
- memset(&rcomp, 0, sizeof(rcomp));
-
- strscpy(rcomp.type, "compression", sizeof(rcomp.type));
- rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
-static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_compress racomp;
-
- memset(&racomp, 0, sizeof(racomp));
-
- strscpy(racomp.type, "acomp", sizeof(racomp.type));
- racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_akcipher rakcipher;
-
- memset(&rakcipher, 0, sizeof(rakcipher));
-
- strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
- rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
- rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
- rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
- rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
- rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
- rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
- rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
- sizeof(rakcipher), &rakcipher);
-}
-
-static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_kpp rkpp;
-
- memset(&rkpp, 0, sizeof(rkpp));
-
- strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
- rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
- rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
- rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
-}
-
-static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "ahash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "shash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_rng rrng;
-
- memset(&rrng, 0, sizeof(rrng));
-
- strscpy(rrng.type, "rng", sizeof(rrng.type));
-
- rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
- rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
- rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
- rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
-}
-
-static int crypto_reportstat_one(struct crypto_alg *alg,
- struct crypto_user_alg *ualg,
- struct sk_buff *skb)
-{
- memset(ualg, 0, sizeof(*ualg));
-
- strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
- strscpy(ualg->cru_driver_name, alg->cra_driver_name,
- sizeof(ualg->cru_driver_name));
- strscpy(ualg->cru_module_name, module_name(alg->cra_module),
- sizeof(ualg->cru_module_name));
-
- ualg->cru_type = 0;
- ualg->cru_mask = 0;
- ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
-
- if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
- goto nla_put_failure;
- if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
- struct crypto_stat_larval rl;
-
- memset(&rl, 0, sizeof(rl));
- strscpy(rl.type, "larval", sizeof(rl.type));
- if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
- goto nla_put_failure;
- goto out;
- }
-
- switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
- case CRYPTO_ALG_TYPE_AEAD:
- if (crypto_report_aead(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SKCIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_CIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- if (crypto_report_comp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_ACOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SCOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AKCIPHER:
- if (crypto_report_akcipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_KPP:
- if (crypto_report_kpp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- if (crypto_report_ahash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_HASH:
- if (crypto_report_shash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_RNG:
- if (crypto_report_rng(skb, alg))
- goto nla_put_failure;
- break;
- default:
- pr_err("ERROR: Unhandled alg %d in %s\n",
- alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
- __func__);
- }
-
-out:
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int crypto_reportstat_alg(struct crypto_alg *alg,
- struct crypto_dump_info *info)
-{
- struct sk_buff *in_skb = info->in_skb;
- struct sk_buff *skb = info->out_skb;
- struct nlmsghdr *nlh;
- struct crypto_user_alg *ualg;
- int err = 0;
-
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
- CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
- if (!nlh) {
- err = -EMSGSIZE;
- goto out;
- }
-
- ualg = nlmsg_data(nlh);
-
- err = crypto_reportstat_one(alg, ualg, skb);
- if (err) {
- nlmsg_cancel(skb, nlh);
- goto out;
- }
-
- nlmsg_end(skb, nlh);
-
-out:
- return err;
-}
-
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- struct net *net = sock_net(in_skb->sk);
- struct crypto_user_alg *p = nlmsg_data(in_nlh);
- struct crypto_alg *alg;
- struct sk_buff *skb;
- struct crypto_dump_info info;
- int err;
-
- if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
- return -EINVAL;
-
- alg = crypto_alg_match(p, 0);
- if (!alg)
- return -ENOENT;
-
- err = -ENOMEM;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
- if (!skb)
- goto drop_alg;
-
- info.in_skb = in_skb;
- info.out_skb = skb;
- info.nlmsg_seq = in_nlh->nlmsg_seq;
- info.nlmsg_flags = 0;
-
- err = crypto_reportstat_alg(alg, &info);
-
-drop_alg:
- crypto_mod_put(alg);
-
- if (err) {
- kfree_skb(skb);
- return err;
- }
-
- return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 23c698b22013..a388f0ceb3a0 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
u8 *ctrblk = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
+ u8 *dst = walk->dst.virt.addr;
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
- crypto_xor(src, keystream, bsize);
+ crypto_xor(dst, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -258,8 +258,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
u32 mask;
int err;
@@ -278,11 +278,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
/* We only support 16-byte blocks. */
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
+ if (alg->ivsize != CTR_RFC3686_BLOCK_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
@@ -303,11 +303,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
- CTR_RFC3686_NONCE_SIZE;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
- CTR_RFC3686_NONCE_SIZE;
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize + CTR_RFC3686_NONCE_SIZE;
+ inst->alg.max_keysize = alg->max_keysize + CTR_RFC3686_NONCE_SIZE;
inst->alg.setkey = crypto_rfc3686_setkey;
inst->alg.encrypt = crypto_rfc3686_crypt;
@@ -352,11 +350,11 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-subsys_initcall(crypto_ctr_module_init);
+module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("rfc3686");
MODULE_ALIAS_CRYPTO("ctr");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/cts.c b/crypto/cts.c
index 3766d47ebcc0..48898d5e24ff 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -85,9 +85,9 @@ static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
return crypto_skcipher_setkey(child, key, keylen);
}
-static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
+static void cts_cbc_crypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err == -EINPROGRESS)
return;
@@ -125,9 +125,9 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
return crypto_skcipher_encrypt(subreq);
}
-static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_cts_encrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err)
goto out;
@@ -219,9 +219,9 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
return crypto_skcipher_decrypt(subreq);
}
-static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_cts_decrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err)
goto out;
@@ -324,8 +324,8 @@ static void crypto_cts_free(struct skcipher_instance *inst)
static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
u32 mask;
int err;
@@ -344,10 +344,10 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
+ if (alg->ivsize != alg->base.cra_blocksize)
goto err_free_inst;
if (strncmp(alg->base.cra_name, "cbc(", 4))
@@ -363,9 +363,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = alg->base.cra_blocksize;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize;
+ inst->alg.max_keysize = alg->max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
@@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-subsys_initcall(crypto_cts_module_init);
+module_init(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
deleted file mode 100644
index d055b0784c77..000000000000
--- a/crypto/curve25519-generic.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <crypto/curve25519.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/kpp.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-
-static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- u8 *secret = kpp_tfm_ctx(tfm);
-
- if (!len)
- curve25519_generate_secret(secret);
- else if (len == CURVE25519_KEY_SIZE &&
- crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
- memcpy(secret, buf, CURVE25519_KEY_SIZE);
- else
- return -EINVAL;
- return 0;
-}
-
-static int curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- const u8 *secret = kpp_tfm_ctx(tfm);
- u8 public_key[CURVE25519_KEY_SIZE];
- u8 buf[CURVE25519_KEY_SIZE];
- int copied, nbytes;
- u8 const *bp;
-
- if (req->src) {
- copied = sg_copy_to_buffer(req->src,
- sg_nents_for_len(req->src,
- CURVE25519_KEY_SIZE),
- public_key, CURVE25519_KEY_SIZE);
- if (copied != CURVE25519_KEY_SIZE)
- return -EINVAL;
- bp = public_key;
- } else {
- bp = curve25519_base_point;
- }
-
- curve25519_generic(buf, secret, bp);
-
- /* might want less than we've got */
- nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
- copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
- nbytes),
- buf, nbytes);
- if (copied != nbytes)
- return -EINVAL;
- return 0;
-}
-
-static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
-{
- return CURVE25519_KEY_SIZE;
-}
-
-static struct kpp_alg curve25519_alg = {
- .base.cra_name = "curve25519",
- .base.cra_driver_name = "curve25519-generic",
- .base.cra_priority = 100,
- .base.cra_module = THIS_MODULE,
- .base.cra_ctxsize = CURVE25519_KEY_SIZE,
-
- .set_secret = curve25519_set_secret,
- .generate_public_key = curve25519_compute_value,
- .compute_shared_secret = curve25519_compute_value,
- .max_size = curve25519_max_size,
-};
-
-static int __init curve25519_init(void)
-{
- return crypto_register_kpp(&curve25519_alg);
-}
-
-static void __exit curve25519_exit(void)
-{
- crypto_unregister_kpp(&curve25519_alg);
-}
-
-subsys_initcall(curve25519_init);
-module_exit(curve25519_exit);
-
-MODULE_ALIAS_CRYPTO("curve25519");
-MODULE_ALIAS_CRYPTO("curve25519-generic");
-MODULE_LICENSE("GPL");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index b2a46f6dc961..a3e1fff55661 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,335 +6,256 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx, int format)
+static DEFINE_MUTEX(deflate_stream_lock);
+
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- MAX_WBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- if (format)
- ret = zlib_deflateInit(stream, 3);
- else
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS,
- DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+ ctx = kvmalloc(struct_size(ctx, workspace, size), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
-static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
+ ctx->stream.workspace = ctx->workspace;
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- if (format)
- ret = zlib_inflateInit(stream);
- else
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
+ return ctx;
}
-static void deflate_comp_exit(struct deflate_ctx *ctx)
+static void deflate_free_stream(void *ctx)
{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
+ kvfree(ctx);
}
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
-}
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .free_ctx = deflate_free_stream,
+};
-static int __deflate_init(void *ctx, int format)
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
int ret;
- ret = deflate_comp_init(ctx, format);
+ ret = acomp_walk_virt(&walk, req, true);
if (ret)
- goto out;
- ret = deflate_decomp_init(ctx, format);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
-
-static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
-{
- struct deflate_ctx *ctx;
- int ret;
+ return ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ do {
+ unsigned int dcur;
- ret = __deflate_init(ctx, format);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
- return ctx;
-}
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
-static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
-{
- return gen_deflate_alloc_ctx(tfm, 0);
-}
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
-static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm)
-{
- return gen_deflate_alloc_ctx(tfm, 1);
-}
+ stream->avail_in = 0;
+ stream->next_in = NULL;
-static int deflate_init(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
- return __deflate_init(ctx, 0);
-}
+ ret = zlib_deflate(stream, flush);
-static void __deflate_exit(void *ctx)
-{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
-}
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
-static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
-{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
-}
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
-static void deflate_exit(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
- __deflate_exit(ctx);
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
-
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
+
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
- return __deflate_compress(src, slen, dst, dlen, dctx);
-}
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ return ret;
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_compress(src, slen, dst, dlen, ctx);
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_decompress(struct acomp_req *req)
{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
+ err = deflate_decompress_one(req, ds);
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_init(struct crypto_acomp *tfm)
{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ int ret;
- return __deflate_decompress(src, slen, dst, dlen, dctx);
-}
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
+ return ret;
}
-static struct crypto_alg alg = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct deflate_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = deflate_init,
- .cra_exit = deflate_exit,
- .cra_u = { .compress = {
- .coa_compress = deflate_compress,
- .coa_decompress = deflate_decompress } }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .base.cra_module = THIS_MODULE,
};
-static struct scomp_alg scomp[] = { {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
-}, {
- .alloc_ctx = zlib_deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "zlib-deflate",
- .cra_driver_name = "zlib-deflate-scomp",
- .cra_module = THIS_MODULE,
- }
-} };
-
static int __init deflate_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp));
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_alg(&alg);
- crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
-subsys_initcall(deflate_mod_init);
+module_init(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
+MODULE_ALIAS_CRYPTO("deflate-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index c85354a5e94c..fce341400914 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -8,11 +8,11 @@
*/
#include <asm/byteorder.h>
+#include <crypto/algapi.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <crypto/internal/des.h>
@@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-subsys_initcall(des_generic_mod_init);
+module_init(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/df_sp80090a.c b/crypto/df_sp80090a.c
new file mode 100644
index 000000000000..dc63b31a93fc
--- /dev/null
+++ b/crypto/df_sp80090a.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
+
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key,
+ u8 keylen);
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key, u8 keylen)
+{
+ aes_expandkey(aesctx, key, keylen);
+}
+
+static void drbg_kcapi_sym(struct crypto_aes_ctx *aesctx,
+ unsigned char *outval,
+ const struct drbg_string *in, u8 blocklen_bytes)
+{
+ /* there is only component in *in */
+ BUG_ON(in->len < blocklen_bytes);
+ aes_encrypt(aesctx, outval, in->buf);
+}
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+
+static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
+ unsigned char *out, const unsigned char *key,
+ struct list_head *in,
+ u8 blocklen_bytes,
+ u8 keylen)
+{
+ struct drbg_string *curr = NULL;
+ struct drbg_string data;
+ short cnt = 0;
+
+ drbg_string_fill(&data, out, blocklen_bytes);
+
+ /* 10.4.3 step 2 / 4 */
+ drbg_kcapi_symsetkey(aesctx, key, keylen);
+ list_for_each_entry(curr, in, list) {
+ const unsigned char *pos = curr->buf;
+ size_t len = curr->len;
+ /* 10.4.3 step 4.1 */
+ while (len) {
+ /* 10.4.3 step 4.2 */
+ if (blocklen_bytes == cnt) {
+ cnt = 0;
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+ }
+ out[cnt] ^= *pos;
+ pos++;
+ cnt++;
+ len--;
+ }
+ }
+ /* 10.4.3 step 4.2 for last block */
+ if (cnt)
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with crypto_drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ * temp
+ * start: drbg->scratchpad
+ * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * note: the cipher writing into this variable works
+ * blocklen-wise. Now, when the statelen is not a multiple
+ * of blocklen, the generateion loop below "spills over"
+ * by at most blocklen. Thus, we need to give sufficient
+ * memory.
+ * df_data
+ * start: drbg->scratchpad +
+ * drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * length: drbg_statelen(drbg)
+ *
+ * crypto_drbg_ctr_df:
+ * pad
+ * start: df_data + drbg_statelen(drbg)
+ * length: drbg_blocklen(drbg)
+ * iv
+ * start: pad + drbg_blocklen(drbg)
+ * length: drbg_blocklen(drbg)
+ * temp
+ * start: iv + drbg_blocklen(drbg)
+ * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ * note: temp is the buffer that the BCC function operates
+ * on. BCC operates blockwise. drbg_statelen(drbg)
+ * is sufficient when the DRBG state length is a multiple
+ * of the block size. For AES192 (and maybe other ciphers)
+ * this is not correct and the length for temp is
+ * insufficient (yes, that also means for such ciphers,
+ * the final output of all BCC rounds are truncated).
+ * Therefore, add drbg_blocklen(drbg) to cover all
+ * possibilities.
+ * refer to crypto_drbg_ctr_df_datalen() to get required length
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
+ unsigned char *df_data, size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen)
+{
+ unsigned char L_N[8];
+ /* S3 is input */
+ struct drbg_string S1, S2, S4, cipherin;
+ LIST_HEAD(bcc_list);
+ unsigned char *pad = df_data + statelen;
+ unsigned char *iv = pad + blocklen_bytes;
+ unsigned char *temp = iv + blocklen_bytes;
+ size_t padlen = 0;
+ unsigned int templen = 0;
+ /* 10.4.2 step 7 */
+ unsigned int i = 0;
+ /* 10.4.2 step 8 */
+ const unsigned char *K = (unsigned char *)
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+ unsigned char *X;
+ size_t generated_len = 0;
+ size_t inputlen = 0;
+ struct drbg_string *seed = NULL;
+ u8 keylen;
+
+ memset(pad, 0, blocklen_bytes);
+ memset(iv, 0, blocklen_bytes);
+ keylen = statelen - blocklen_bytes;
+ /* 10.4.2 step 1 is implicit as we work byte-wise */
+
+ /* 10.4.2 step 2 */
+ if ((512 / 8) < bytes_to_return)
+ return -EINVAL;
+
+ /* 10.4.2 step 2 -- calculate the entire length of all input data */
+ list_for_each_entry(seed, seedlist, list)
+ inputlen += seed->len;
+ drbg_cpu_to_be32(inputlen, &L_N[0]);
+
+ /* 10.4.2 step 3 */
+ drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
+
+ /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+ padlen = (inputlen + sizeof(L_N) + 1) % (blocklen_bytes);
+ /* wrap the padlen appropriately */
+ if (padlen)
+ padlen = blocklen_bytes - padlen;
+ /*
+ * pad / padlen contains the 0x80 byte and the following zero bytes.
+ * As the calculated padlen value only covers the number of zero
+ * bytes, this value has to be incremented by one for the 0x80 byte.
+ */
+ padlen++;
+ pad[0] = 0x80;
+
+ /* 10.4.2 step 4 -- first fill the linked list and then order it */
+ drbg_string_fill(&S1, iv, blocklen_bytes);
+ list_add_tail(&S1.list, &bcc_list);
+ drbg_string_fill(&S2, L_N, sizeof(L_N));
+ list_add_tail(&S2.list, &bcc_list);
+ list_splice_tail(seedlist, &bcc_list);
+ drbg_string_fill(&S4, pad, padlen);
+ list_add_tail(&S4.list, &bcc_list);
+
+ /* 10.4.2 step 9 */
+ while (templen < (keylen + (blocklen_bytes))) {
+ /*
+ * 10.4.2 step 9.1 - the padding is implicit as the buffer
+ * holds zeros after allocation -- even the increment of i
+ * is irrelevant as the increment remains within length of i
+ */
+ drbg_cpu_to_be32(i, iv);
+ /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+ drbg_ctr_bcc(aesctx, temp + templen, K, &bcc_list,
+ blocklen_bytes, keylen);
+ /* 10.4.2 step 9.3 */
+ i++;
+ templen += blocklen_bytes;
+ }
+
+ /* 10.4.2 step 11 */
+ X = temp + (keylen);
+ drbg_string_fill(&cipherin, X, blocklen_bytes);
+
+ /* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+ /* 10.4.2 step 13 */
+ drbg_kcapi_symsetkey(aesctx, temp, keylen);
+ while (generated_len < bytes_to_return) {
+ short blocklen = 0;
+ /*
+ * 10.4.2 step 13.1: the truncation of the key length is
+ * implicit as the key is only drbg_blocklen in size based on
+ * the implementation of the cipher function callback
+ */
+ drbg_kcapi_sym(aesctx, X, &cipherin, blocklen_bytes);
+ blocklen = (blocklen_bytes <
+ (bytes_to_return - generated_len)) ?
+ blocklen_bytes :
+ (bytes_to_return - generated_len);
+ /* 10.4.2 step 13.2 and 14 */
+ memcpy(df_data + generated_len, X, blocklen);
+ generated_len += blocklen;
+ }
+
+ memset(iv, 0, blocklen_bytes);
+ memset(temp, 0, statelen + blocklen_bytes);
+ memset(pad, 0, blocklen_bytes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_drbg_ctr_df);
+
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Derivation Function conformant to SP800-90A");
diff --git a/crypto/dh.c b/crypto/dh.c
index 99c3b2ef7adc..8250eeeebd0f 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -106,6 +106,12 @@ err_clear_ctx:
*/
static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
{
+ MPI val, q;
+ int ret;
+
+ if (!fips_enabled)
+ return 0;
+
if (unlikely(!ctx->p))
return -EINVAL;
@@ -125,40 +131,35 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
*
* For the safe-prime groups q = (p - 1)/2.
*/
- if (fips_enabled) {
- MPI val, q;
- int ret;
+ val = mpi_alloc(0);
+ if (!val)
+ return -ENOMEM;
- val = mpi_alloc(0);
- if (!val)
- return -ENOMEM;
+ q = mpi_alloc(mpi_get_nlimbs(ctx->p));
+ if (!q) {
+ mpi_free(val);
+ return -ENOMEM;
+ }
- q = mpi_alloc(mpi_get_nlimbs(ctx->p));
- if (!q) {
- mpi_free(val);
- return -ENOMEM;
- }
+ /*
+ * ->p is odd, so no need to explicitly subtract one
+ * from it before shifting to the right.
+ */
+ ret = mpi_rshift(q, ctx->p, 1) ?:
+ mpi_powm(val, y, q, ctx->p);
- /*
- * ->p is odd, so no need to explicitly subtract one
- * from it before shifting to the right.
- */
- mpi_rshift(q, ctx->p, 1);
-
- ret = mpi_powm(val, y, q, ctx->p);
- mpi_free(q);
- if (ret) {
- mpi_free(val);
- return ret;
- }
+ mpi_free(q);
+ if (ret) {
+ mpi_free(val);
+ return ret;
+ }
- ret = mpi_cmp_ui(val, 1);
+ ret = mpi_cmp_ui(val, 1);
- mpi_free(val);
+ mpi_free(val);
- if (ret != 0)
- return -EINVAL;
- }
+ if (ret != 0)
+ return -EINVAL;
return 0;
}
@@ -318,6 +319,9 @@ static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm)
if (IS_ERR(tfm_ctx->dh_tfm))
return PTR_ERR(tfm_ctx->dh_tfm);
+ kpp_set_reqsize(tfm, sizeof(struct kpp_request) +
+ crypto_kpp_reqsize(tfm_ctx->dh_tfm));
+
return 0;
}
@@ -500,10 +504,9 @@ out:
return err;
}
-static void dh_safe_prime_complete_req(struct crypto_async_request *dh_req,
- int err)
+static void dh_safe_prime_complete_req(void *data, int err)
{
- struct kpp_request *req = dh_req->data;
+ struct kpp_request *req = data;
kpp_request_complete(req, err);
}
@@ -593,7 +596,6 @@ static int __maybe_unused __dh_safe_prime_create(
inst->alg.max_size = dh_safe_prime_max_size;
inst->alg.init = dh_safe_prime_init_tfm;
inst->alg.exit = dh_safe_prime_exit_tfm;
- inst->alg.reqsize = sizeof(struct kpp_request) + dh_alg->reqsize;
inst->alg.base.cra_priority = dh_alg->base.cra_priority;
inst->alg.base.cra_module = THIS_MODULE;
inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx);
@@ -918,7 +920,7 @@ static void __exit dh_exit(void)
crypto_unregister_kpp(&dh);
}
-subsys_initcall(dh_init);
+module_init(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 982d4ca4526d..1d433dae9955 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,9 +98,11 @@
*/
#include <crypto/drbg.h>
+#include <crypto/df_sp80090a.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
+#include <linux/string_choices.h>
/***************************************************************
* Backend cipher definitions available to DRBG
@@ -111,9 +113,9 @@
* as stdrng. Each DRBG receives an increasing cra_priority values the later
* they are defined in this array (see drbg_fill_array).
*
- * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
- * the SHA256 / AES 256 over other ciphers. Thus, the favored
- * DRBGs are the latest entries in this array.
+ * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the
+ * HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the
+ * favored DRBGs are the latest entries in this array.
*/
static const struct drbg_core drbg_cores[] = {
#ifdef CONFIG_CRYPTO_DRBG_CTR
@@ -139,12 +141,6 @@ static const struct drbg_core drbg_cores[] = {
#endif /* CONFIG_CRYPTO_DRBG_CTR */
#ifdef CONFIG_CRYPTO_DRBG_HASH
{
- .flags = DRBG_HASH | DRBG_STRENGTH128,
- .statelen = 55, /* 440 bits */
- .blocklen_bytes = 20,
- .cra_name = "sha1",
- .backend_cra_name = "sha1",
- }, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
.blocklen_bytes = 48,
@@ -166,12 +162,6 @@ static const struct drbg_core drbg_cores[] = {
#endif /* CONFIG_CRYPTO_DRBG_HASH */
#ifdef CONFIG_CRYPTO_DRBG_HMAC
{
- .flags = DRBG_HMAC | DRBG_STRENGTH128,
- .statelen = 20, /* block length of cipher */
- .blocklen_bytes = 20,
- .cra_name = "hmac_sha1",
- .backend_cra_name = "hmac(sha1)",
- }, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 48, /* block length of cipher */
.blocklen_bytes = 48,
@@ -272,26 +262,6 @@ static int drbg_fips_continuous_test(struct drbg_state *drbg,
return 0;
}
-/*
- * Convert an integer into a byte representation of this integer.
- * The byte representation is big-endian
- *
- * @val value to be converted
- * @buf buffer holding the converted integer -- caller must ensure that
- * buffer size is at least 32 bit
- */
-#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
-static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
-{
- struct s {
- __be32 conv;
- };
- struct s *conversion = (struct s *) buf;
-
- conversion->conv = cpu_to_be32(val);
-}
-#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
-
/******************************************************************
* CTR DRBG callback functions
******************************************************************/
@@ -305,10 +275,6 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key);
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
@@ -316,202 +282,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *outbuf, u32 outlen);
#define DRBG_OUTSCRATCHLEN 256
-/* BCC function for CTR DRBG as defined in 10.4.3 */
-static int drbg_ctr_bcc(struct drbg_state *drbg,
- unsigned char *out, const unsigned char *key,
- struct list_head *in)
-{
- int ret = 0;
- struct drbg_string *curr = NULL;
- struct drbg_string data;
- short cnt = 0;
-
- drbg_string_fill(&data, out, drbg_blocklen(drbg));
-
- /* 10.4.3 step 2 / 4 */
- drbg_kcapi_symsetkey(drbg, key);
- list_for_each_entry(curr, in, list) {
- const unsigned char *pos = curr->buf;
- size_t len = curr->len;
- /* 10.4.3 step 4.1 */
- while (len) {
- /* 10.4.3 step 4.2 */
- if (drbg_blocklen(drbg) == cnt) {
- cnt = 0;
- ret = drbg_kcapi_sym(drbg, out, &data);
- if (ret)
- return ret;
- }
- out[cnt] ^= *pos;
- pos++;
- cnt++;
- len--;
- }
- }
- /* 10.4.3 step 4.2 for last block */
- if (cnt)
- ret = drbg_kcapi_sym(drbg, out, &data);
-
- return ret;
-}
-
-/*
- * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
- * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
- * the scratchpad is used as follows:
- * drbg_ctr_update:
- * temp
- * start: drbg->scratchpad
- * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
- * note: the cipher writing into this variable works
- * blocklen-wise. Now, when the statelen is not a multiple
- * of blocklen, the generateion loop below "spills over"
- * by at most blocklen. Thus, we need to give sufficient
- * memory.
- * df_data
- * start: drbg->scratchpad +
- * drbg_statelen(drbg) + drbg_blocklen(drbg)
- * length: drbg_statelen(drbg)
- *
- * drbg_ctr_df:
- * pad
- * start: df_data + drbg_statelen(drbg)
- * length: drbg_blocklen(drbg)
- * iv
- * start: pad + drbg_blocklen(drbg)
- * length: drbg_blocklen(drbg)
- * temp
- * start: iv + drbg_blocklen(drbg)
- * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
- * note: temp is the buffer that the BCC function operates
- * on. BCC operates blockwise. drbg_statelen(drbg)
- * is sufficient when the DRBG state length is a multiple
- * of the block size. For AES192 (and maybe other ciphers)
- * this is not correct and the length for temp is
- * insufficient (yes, that also means for such ciphers,
- * the final output of all BCC rounds are truncated).
- * Therefore, add drbg_blocklen(drbg) to cover all
- * possibilities.
- */
-
-/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df(struct drbg_state *drbg,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist)
{
- int ret = -EFAULT;
- unsigned char L_N[8];
- /* S3 is input */
- struct drbg_string S1, S2, S4, cipherin;
- LIST_HEAD(bcc_list);
- unsigned char *pad = df_data + drbg_statelen(drbg);
- unsigned char *iv = pad + drbg_blocklen(drbg);
- unsigned char *temp = iv + drbg_blocklen(drbg);
- size_t padlen = 0;
- unsigned int templen = 0;
- /* 10.4.2 step 7 */
- unsigned int i = 0;
- /* 10.4.2 step 8 */
- const unsigned char *K = (unsigned char *)
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
- unsigned char *X;
- size_t generated_len = 0;
- size_t inputlen = 0;
- struct drbg_string *seed = NULL;
-
- memset(pad, 0, drbg_blocklen(drbg));
- memset(iv, 0, drbg_blocklen(drbg));
-
- /* 10.4.2 step 1 is implicit as we work byte-wise */
-
- /* 10.4.2 step 2 */
- if ((512/8) < bytes_to_return)
- return -EINVAL;
-
- /* 10.4.2 step 2 -- calculate the entire length of all input data */
- list_for_each_entry(seed, seedlist, list)
- inputlen += seed->len;
- drbg_cpu_to_be32(inputlen, &L_N[0]);
-
- /* 10.4.2 step 3 */
- drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
-
- /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
- padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
- /* wrap the padlen appropriately */
- if (padlen)
- padlen = drbg_blocklen(drbg) - padlen;
- /*
- * pad / padlen contains the 0x80 byte and the following zero bytes.
- * As the calculated padlen value only covers the number of zero
- * bytes, this value has to be incremented by one for the 0x80 byte.
- */
- padlen++;
- pad[0] = 0x80;
-
- /* 10.4.2 step 4 -- first fill the linked list and then order it */
- drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
- list_add_tail(&S1.list, &bcc_list);
- drbg_string_fill(&S2, L_N, sizeof(L_N));
- list_add_tail(&S2.list, &bcc_list);
- list_splice_tail(seedlist, &bcc_list);
- drbg_string_fill(&S4, pad, padlen);
- list_add_tail(&S4.list, &bcc_list);
-
- /* 10.4.2 step 9 */
- while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
- /*
- * 10.4.2 step 9.1 - the padding is implicit as the buffer
- * holds zeros after allocation -- even the increment of i
- * is irrelevant as the increment remains within length of i
- */
- drbg_cpu_to_be32(i, iv);
- /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
- ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
- if (ret)
- goto out;
- /* 10.4.2 step 9.3 */
- i++;
- templen += drbg_blocklen(drbg);
- }
-
- /* 10.4.2 step 11 */
- X = temp + (drbg_keylen(drbg));
- drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
-
- /* 10.4.2 step 12: overwriting of outval is implemented in next step */
-
- /* 10.4.2 step 13 */
- drbg_kcapi_symsetkey(drbg, temp);
- while (generated_len < bytes_to_return) {
- short blocklen = 0;
- /*
- * 10.4.2 step 13.1: the truncation of the key length is
- * implicit as the key is only drbg_blocklen in size based on
- * the implementation of the cipher function callback
- */
- ret = drbg_kcapi_sym(drbg, X, &cipherin);
- if (ret)
- goto out;
- blocklen = (drbg_blocklen(drbg) <
- (bytes_to_return - generated_len)) ?
- drbg_blocklen(drbg) :
- (bytes_to_return - generated_len);
- /* 10.4.2 step 13.2 and 14 */
- memcpy(df_data + generated_len, X, blocklen);
- generated_len += blocklen;
- }
-
- ret = 0;
-
-out:
- memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
- memset(pad, 0, drbg_blocklen(drbg));
- return ret;
+ return crypto_drbg_ctr_df(drbg->priv_data, df_data, drbg_statelen(drbg),
+ seedlist, drbg_blocklen(drbg), drbg_statelen(drbg));
}
/*
@@ -648,8 +424,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
-MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
-MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
/* update function of HMAC DRBG as defined in 10.1.2.2 */
static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
@@ -768,8 +542,6 @@ MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
-MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
-MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
/*
* Increment buffer
@@ -1325,10 +1097,8 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
sb_size = 0;
else if (drbg->core->flags & DRBG_CTR)
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
- drbg_statelen(drbg) + /* df_data */
- drbg_blocklen(drbg) + /* pad */
- drbg_blocklen(drbg) + /* iv */
- drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+ crypto_drbg_ctr_df_datalen(drbg_statelen(drbg),
+ drbg_blocklen(drbg));
else
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
@@ -1428,7 +1198,7 @@ static int drbg_generate(struct drbg_state *drbg,
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
- drbg->pr ? "true" : "false",
+ str_true_false(drbg->pr),
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
@@ -1475,11 +1245,11 @@ static int drbg_generate(struct drbg_state *drbg,
int err = 0;
pr_devel("DRBG: start to perform self test\n");
if (drbg->core->flags & DRBG_HMAC)
- err = alg_test("drbg_pr_hmac_sha256",
- "drbg_pr_hmac_sha256", 0, 0);
+ err = alg_test("drbg_pr_hmac_sha512",
+ "drbg_pr_hmac_sha512", 0, 0);
else if (drbg->core->flags & DRBG_CTR)
- err = alg_test("drbg_pr_ctr_aes128",
- "drbg_pr_ctr_aes128", 0, 0);
+ err = alg_test("drbg_pr_ctr_aes256",
+ "drbg_pr_ctr_aes256", 0, 0);
else
err = alg_test("drbg_pr_sha256",
"drbg_pr_sha256", 0, 0);
@@ -1546,7 +1316,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
- if (fips_enabled || err != -ENOENT)
+ if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
@@ -1578,7 +1348,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed = true;
pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
- "%s\n", coreref, pr ? "enabled" : "disabled");
+ "%s\n", coreref, str_enabled_disabled(pr));
mutex_lock(&drbg->drbg_mutex);
/* 9.1 step 1 is implicit with the selected DRBG type */
@@ -1673,7 +1443,6 @@ static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash;
- char ctx[];
};
static int drbg_init_hash_kernel(struct drbg_state *drbg)
@@ -1698,7 +1467,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
sdesc->shash.tfm = tfm;
drbg->priv_data = sdesc;
- return crypto_shash_alignmask(tfm);
+ return 0;
}
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
@@ -1736,10 +1505,9 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm =
- (struct crypto_cipher *)drbg->priv_data;
- if (tfm)
- crypto_free_cipher(tfm);
+ struct crypto_aes_ctx *aesctx = (struct crypto_aes_ctx *)drbg->priv_data;
+
+ kfree(aesctx);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
@@ -1758,20 +1526,16 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm;
+ struct crypto_aes_ctx *aesctx;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
- if (IS_ERR(tfm)) {
- pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
- drbg->core->backend_cra_name);
- return PTR_ERR(tfm);
- }
- BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
- drbg->priv_data = tfm;
+ aesctx = kzalloc(sizeof(*aesctx), GFP_KERNEL);
+ if (!aesctx)
+ return -ENOMEM;
+ drbg->priv_data = aesctx;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
@@ -1815,25 +1579,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return alignmask;
}
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
-}
-
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- /* there is only component in *in */
- BUG_ON(in->len < drbg_blocklen(drbg));
- crypto_cipher_encrypt_one(tfm, outval, in->buf);
- return 0;
-}
-
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
@@ -2017,11 +1762,13 @@ static inline int __init drbg_healthcheck_sanity(void)
return 0;
#ifdef CONFIG_CRYPTO_DRBG_CTR
- drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
-#elif defined CONFIG_CRYPTO_DRBG_HASH
+ drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr);
+#endif
+#ifdef CONFIG_CRYPTO_DRBG_HASH
drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
-#else
- drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
+#endif
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+ drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr);
#endif
drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
@@ -2145,7 +1892,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-subsys_initcall(drbg_init);
+module_init(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
@@ -2164,4 +1911,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING);
MODULE_ALIAS_CRYPTO("stdrng");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 71fbb0543d64..cd1b20456dad 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -5,75 +5,198 @@
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
-static int crypto_ecb_crypt(struct skcipher_request *req,
- struct crypto_cipher *cipher,
+static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src,
+ u8 *dst, unsigned nbytes, bool final,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
const unsigned int bsize = crypto_cipher_blocksize(cipher);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) != 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
+ while (nbytes >= bsize) {
+ fn(crypto_cipher_tfm(cipher), dst, src);
- do {
- fn(crypto_cipher_tfm(cipher), dst, src);
+ src += bsize;
+ dst += bsize;
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- err = skcipher_walk_done(&walk, nbytes);
+ nbytes -= bsize;
}
- return err;
+ return nbytes && final ? -EINVAL : nbytes;
}
-static int crypto_ecb_encrypt(struct skcipher_request *req)
+static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
- return crypto_ecb_crypt(req, cipher,
+ return crypto_ecb_crypt(cipher, src, dst, len,
+ flags & CRYPTO_LSKCIPHER_FLAG_FINAL,
crypto_cipher_alg(cipher)->cia_encrypt);
}
-static int crypto_ecb_decrypt(struct skcipher_request *req)
+static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
- return crypto_ecb_crypt(req, cipher,
+ return crypto_ecb_crypt(cipher, src, dst, len,
+ flags & CRYPTO_LSKCIPHER_FLAG_FINAL,
crypto_cipher_alg(cipher)->cia_decrypt);
}
-static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int lskcipher_setkey_simple2(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
+
+ crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ return crypto_cipher_setkey(cipher, key, keylen);
+}
+
+static int lskcipher_init_tfm_simple2(struct crypto_lskcipher *tfm)
+{
+ struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher_spawn *spawn;
+ struct crypto_cipher *cipher;
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ *ctx = cipher;
+ return 0;
+}
+
+static void lskcipher_exit_tfm_simple2(struct crypto_lskcipher *tfm)
+{
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ crypto_free_cipher(*ctx);
+}
+
+static void lskcipher_free_instance_simple2(struct lskcipher_instance *inst)
+{
+ crypto_drop_cipher(lskcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static struct lskcipher_instance *lskcipher_alloc_instance_simple2(
+ struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_cipher_spawn *spawn;
+ struct lskcipher_instance *inst;
+ struct crypto_alg *cipher_alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+ spawn = lskcipher_instance_ctx(inst);
+
+ err = crypto_grab_cipher(spawn, lskcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ cipher_alg = crypto_spawn_cipher_alg(spawn);
+
+ err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
+ cipher_alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->free = lskcipher_free_instance_simple2;
+
+ /* Default algorithm properties, can be overridden */
+ inst->alg.co.base.cra_blocksize = cipher_alg->cra_blocksize;
+ inst->alg.co.base.cra_alignmask = cipher_alg->cra_alignmask;
+ inst->alg.co.base.cra_priority = cipher_alg->cra_priority;
+ inst->alg.co.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
+ inst->alg.co.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
+ inst->alg.co.ivsize = cipher_alg->cra_blocksize;
+
+ /* Use struct crypto_cipher * by default, can be overridden */
+ inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_cipher *);
+ inst->alg.setkey = lskcipher_setkey_simple2;
+ inst->alg.init = lskcipher_init_tfm_simple2;
+ inst->alg.exit = lskcipher_exit_tfm_simple2;
+
+ return inst;
+
+err_free_inst:
+ lskcipher_free_instance_simple2(inst);
+ return ERR_PTR(err);
+}
+
+static int crypto_ecb_create2(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct skcipher_instance *inst;
+ struct lskcipher_instance *inst;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb);
+ inst = lskcipher_alloc_instance_simple2(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
- inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */
+ /* ECB mode doesn't take an IV */
+ inst->alg.co.ivsize = 0;
+
+ inst->alg.encrypt = crypto_ecb_encrypt2;
+ inst->alg.decrypt = crypto_ecb_decrypt2;
+
+ err = lskcipher_register_instance(tmpl, inst);
+ if (err)
+ inst->free(inst);
+
+ return err;
+}
+
+static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_lskcipher_spawn *spawn;
+ struct lskcipher_alg *cipher_alg;
+ struct lskcipher_instance *inst;
+ int err;
+
+ inst = lskcipher_alloc_instance_simple(tmpl, tb);
+ if (IS_ERR(inst)) {
+ err = crypto_ecb_create2(tmpl, tb);
+ return err;
+ }
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher_alg = crypto_lskcipher_spawn_alg(spawn);
+
+ /* ECB mode doesn't take an IV */
+ inst->alg.co.ivsize = 0;
+ if (cipher_alg->co.ivsize)
+ return -EINVAL;
- inst->alg.encrypt = crypto_ecb_encrypt;
- inst->alg.decrypt = crypto_ecb_decrypt;
+ inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize;
+ inst->alg.setkey = cipher_alg->setkey;
+ inst->alg.encrypt = cipher_alg->encrypt;
+ inst->alg.decrypt = cipher_alg->decrypt;
+ inst->alg.init = cipher_alg->init;
+ inst->alg.exit = cipher_alg->exit;
- err = skcipher_register_instance(tmpl, inst);
+ err = lskcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
@@ -96,9 +219,10 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-subsys_initcall(crypto_ecb_module_init);
+module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ecb");
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 7315217c8f73..6cf9a945fc6c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -33,7 +33,7 @@
#include <crypto/ecdh.h>
#include <crypto/rng.h>
#include <crypto/internal/ecc.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ratelimit.h>
#include "ecc_curve_defs.h"
@@ -60,12 +60,36 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
return &nist_p256;
case ECC_CURVE_NIST_P384:
return &nist_p384;
+ case ECC_CURVE_NIST_P521:
+ return &nist_p521;
default:
return NULL;
}
}
EXPORT_SYMBOL(ecc_get_curve);
+void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits)
+{
+ int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64));
+ unsigned int o = nbytes & 7;
+ __be64 msd = 0;
+
+ /* diff > 0: not enough input bytes: set most significant digits to 0 */
+ if (diff > 0) {
+ ndigits -= diff;
+ memset(&out[ndigits], 0, diff * sizeof(u64));
+ }
+
+ if (o) {
+ memcpy((u8 *)&msd + sizeof(msd) - o, in, o);
+ out[--ndigits] = be64_to_cpu(msd);
+ in += o;
+ }
+ ecc_swap_digits(in, out, ndigits);
+}
+EXPORT_SYMBOL(ecc_digits_from_bytes);
+
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
size_t len = ndigits * sizeof(u64);
@@ -689,7 +713,7 @@ static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
static void vli_mmod_fast_192(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
- const unsigned int ndigits = 3;
+ const unsigned int ndigits = ECC_CURVE_NIST_P192_DIGITS;
int carry;
vli_set(result, product, ndigits);
@@ -717,7 +741,7 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
- const unsigned int ndigits = 4;
+ const unsigned int ndigits = ECC_CURVE_NIST_P256_DIGITS;
/* t */
vli_set(result, product, ndigits);
@@ -800,7 +824,7 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
- const unsigned int ndigits = 6;
+ const unsigned int ndigits = ECC_CURVE_NIST_P384_DIGITS;
/* t */
vli_set(result, product, ndigits);
@@ -902,6 +926,28 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product,
#undef AND64H
#undef AND64L
+/*
+ * Computes result = product % curve_prime
+ * from "Recommendations for Discrete Logarithm-Based Cryptography:
+ * Elliptic Curve Domain Parameters" section G.1.4
+ */
+static void vli_mmod_fast_521(u64 *result, const u64 *product,
+ const u64 *curve_prime, u64 *tmp)
+{
+ const unsigned int ndigits = ECC_CURVE_NIST_P521_DIGITS;
+ size_t i;
+
+ /* Initialize result with lowest 521 bits from product */
+ vli_set(result, product, ndigits);
+ result[8] &= 0x1ff;
+
+ for (i = 0; i < ndigits; i++)
+ tmp[i] = (product[8 + i] >> 9) | (product[9 + i] << 55);
+ tmp[8] &= 0x1ff;
+
+ vli_mod_add(result, result, tmp, curve_prime, ndigits);
+}
+
/* Computes result = product % curve_prime for different curve_primes.
*
* Note that curve_primes are distinguished just by heuristic check and
@@ -932,15 +978,18 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
}
switch (ndigits) {
- case 3:
+ case ECC_CURVE_NIST_P192_DIGITS:
vli_mmod_fast_192(result, product, curve_prime, tmp);
break;
- case 4:
+ case ECC_CURVE_NIST_P256_DIGITS:
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
- case 6:
+ case ECC_CURVE_NIST_P384_DIGITS:
vli_mmod_fast_384(result, product, curve_prime, tmp);
break;
+ case ECC_CURVE_NIST_P521_DIGITS:
+ vli_mmod_fast_521(result, product, curve_prime, tmp);
+ break;
default:
pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
@@ -1295,7 +1344,10 @@ static void ecc_point_mult(struct ecc_point *result,
carry = vli_add(sk[0], scalar, curve->n, ndigits);
vli_add(sk[1], sk[0], curve->n, ndigits);
scalar = sk[!carry];
- num_bits = sizeof(u64) * ndigits * 8 + 1;
+ if (curve->nbits == 521) /* NIST P521 */
+ num_bits = curve->nbits + 2;
+ else
+ num_bits = sizeof(u64) * ndigits * 8 + 1;
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
@@ -1384,7 +1436,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
i = num_bits - 1;
- idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ idx = !!vli_test_bit(u1, i);
+ idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
vli_set(rx, point->x, ndigits);
@@ -1394,7 +1447,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
for (--i; i >= 0; i--) {
ecc_point_double_jacobian(rx, ry, z, curve);
- idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ idx = !!vli_test_bit(u1, i);
+ idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
if (point) {
u64 tx[ECC_MAX_DIGITS];
@@ -1414,6 +1468,12 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
}
EXPORT_SYMBOL(ecc_point_mult_shamir);
+/*
+ * This function performs checks equivalent to Appendix A.4.2 of FIPS 186-5.
+ * Whereas A.4.2 results in an integer in the interval [1, n-1], this function
+ * ensures that the integer is in the range of [2, n-3]. We are slightly
+ * stricter because of the currently used scalar multiplication algorithm.
+ */
static int __ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, unsigned int ndigits)
{
@@ -1453,31 +1513,29 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
EXPORT_SYMBOL(ecc_is_key_valid);
/*
- * ECC private keys are generated using the method of extra random bits,
- * equivalent to that described in FIPS 186-4, Appendix B.4.1.
- *
- * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
- * than requested
- * 0 <= c mod(n-1) <= n-2 and implies that
- * 1 <= d <= n-1
+ * ECC private keys are generated using the method of rejection sampling,
+ * equivalent to that described in FIPS 186-5, Appendix A.2.2.
*
* This method generates a private key uniformly distributed in the range
- * [1, n-1].
+ * [2, n-3].
*/
-int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key)
{
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- u64 priv[ECC_MAX_DIGITS];
unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
unsigned int nbits = vli_num_bits(curve->n, ndigits);
int err;
- /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
- if (nbits < 160 || ndigits > ARRAY_SIZE(priv))
+ /*
+ * Step 1 & 2: check that N is included in Table 1 of FIPS 186-5,
+ * section 6.1.1.
+ */
+ if (nbits < 224)
return -EINVAL;
/*
- * FIPS 186-4 recommends that the private key should be obtained from a
+ * FIPS 186-5 recommends that the private key should be obtained from a
* RBG with a security strength equal to or greater than the security
* strength associated with N.
*
@@ -1490,17 +1548,17 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
if (crypto_get_default_rng())
return -EFAULT;
- err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+ /* Step 3: obtain N returned_bits from the DRBG. */
+ err = crypto_rng_get_bytes(crypto_default_rng,
+ (u8 *)private_key, nbytes);
crypto_put_default_rng();
if (err)
return err;
- /* Make sure the private key is in the valid range. */
- if (__ecc_is_key_valid(curve, priv, ndigits))
+ /* Step 4: make sure the private key is in the valid range. */
+ if (__ecc_is_key_valid(curve, private_key, ndigits))
return -EINVAL;
- ecc_swap_digits(priv, privkey, ndigits);
-
return 0;
}
EXPORT_SYMBOL(ecc_gen_privkey);
@@ -1510,23 +1568,20 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *pk;
- u64 priv[ECC_MAX_DIGITS];
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) {
+ if (!private_key) {
ret = -EINVAL;
goto out;
}
- ecc_swap_digits(private_key, priv, ndigits);
-
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
goto out;
}
- ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
+ ecc_point_mult(pk, &curve->g, private_key, NULL, curve, ndigits);
/* SP800-56A rev 3 5.6.2.1.3 key check */
if (ecc_is_pubkey_valid_full(curve, pk)) {
@@ -1610,13 +1665,11 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *product, *pk;
- u64 priv[ECC_MAX_DIGITS];
u64 rand_z[ECC_MAX_DIGITS];
unsigned int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- if (!private_key || !public_key || !curve ||
- ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) {
+ if (!private_key || !public_key || ndigits > ARRAY_SIZE(rand_z)) {
ret = -EINVAL;
goto out;
}
@@ -1637,15 +1690,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
if (ret)
goto err_alloc_product;
- ecc_swap_digits(private_key, priv, ndigits);
-
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
- ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
+ ecc_point_mult(product, pk, private_key, rand_z, curve, ndigits);
if (ecc_point_is_zero(product)) {
ret = -EFAULT;
@@ -1655,7 +1706,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_swap_digits(product->x, secret, ndigits);
err_validity:
- memzero_explicit(priv, sizeof(priv));
memzero_explicit(rand_z, sizeof(rand_z));
ecc_free_point(product);
err_alloc_product:
@@ -1665,4 +1715,5 @@ out:
}
EXPORT_SYMBOL(crypto_ecdh_shared_secret);
+MODULE_DESCRIPTION("core elliptic curve module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 9719934c9428..0ecade7d02f5 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -17,6 +17,7 @@ static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
+ .nbits = 192,
.g = {
.x = nist_p192_g_x,
.y = nist_p192_g_y,
@@ -43,6 +44,7 @@ static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
+ .nbits = 256,
.g = {
.x = nist_p256_g_x,
.y = nist_p256_g_y,
@@ -75,6 +77,7 @@ static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull,
0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull };
static struct ecc_curve nist_p384 = {
.name = "nist_384",
+ .nbits = 384,
.g = {
.x = nist_p384_g_x,
.y = nist_p384_g_y,
@@ -86,6 +89,51 @@ static struct ecc_curve nist_p384 = {
.b = nist_p384_b
};
+/* NIST P-521 */
+static u64 nist_p521_g_x[] = { 0xf97e7e31c2e5bd66ull, 0x3348b3c1856a429bull,
+ 0xfe1dc127a2ffa8deull, 0xa14b5e77efe75928ull,
+ 0xf828af606b4d3dbaull, 0x9c648139053fb521ull,
+ 0x9e3ecb662395b442ull, 0x858e06b70404e9cdull,
+ 0xc6ull };
+static u64 nist_p521_g_y[] = { 0x88be94769fd16650ull, 0x353c7086a272c240ull,
+ 0xc550b9013fad0761ull, 0x97ee72995ef42640ull,
+ 0x17afbd17273e662cull, 0x98f54449579b4468ull,
+ 0x5c8a5fb42c7d1bd9ull, 0x39296a789a3bc004ull,
+ 0x118ull };
+static u64 nist_p521_p[] = { 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_n[] = { 0xbb6fb71e91386409ull, 0x3bb5c9b8899c47aeull,
+ 0x7fcc0148f709a5d0ull, 0x51868783bf2f966bull,
+ 0xfffffffffffffffaull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_a[] = { 0xfffffffffffffffcull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_b[] = { 0xef451fd46b503f00ull, 0x3573df883d2c34f1ull,
+ 0x1652c0bd3bb1bf07ull, 0x56193951ec7e937bull,
+ 0xb8b489918ef109e1ull, 0xa2da725b99b315f3ull,
+ 0x929a21a0b68540eeull, 0x953eb9618e1c9a1full,
+ 0x051ull };
+static struct ecc_curve nist_p521 = {
+ .name = "nist_521",
+ .nbits = 521,
+ .g = {
+ .x = nist_p521_g_x,
+ .y = nist_p521_g_y,
+ .ndigits = 9,
+ },
+ .p = nist_p521_p,
+ .n = nist_p521_n,
+ .a = nist_p521_a,
+ .b = nist_p521_b
+};
+
/* curve25519 */
static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
@@ -95,6 +143,7 @@ static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
static const struct ecc_curve ecc_25519 = {
.name = "curve25519",
+ .nbits = 255,
.g = {
.x = curve25519_g_x,
.ndigits = 4,
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 80afee3234fb..9f0b93b3166d 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -28,23 +28,28 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;
+ int ret = 0;
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
+ memset(ctx->private_key, 0, sizeof(ctx->private_key));
+
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
- memcpy(ctx->private_key, params.key, params.key_size);
+ ecc_digits_from_bytes(params.key, params.key_size,
+ ctx->private_key, ctx->ndigits);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
ctx->private_key, params.key_size) < 0) {
memzero_explicit(ctx->private_key, params.key_size);
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+
+ return ret;
}
static int ecdh_compute_value(struct kpp_request *req)
@@ -235,7 +240,7 @@ static void __exit ecdh_exit(void)
crypto_unregister_kpp(&ecdh_nist_p384);
}
-subsys_initcall(ecdh_init);
+module_init(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c
new file mode 100644
index 000000000000..e0c55c64711c
--- /dev/null
+++ b/crypto/ecdsa-p1363.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ECDSA P1363 signature encoding
+ *
+ * Copyright (c) 2024 Intel Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/sig.h>
+
+struct ecdsa_p1363_ctx {
+ struct crypto_sig *child;
+};
+
+static int ecdsa_p1363_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+ unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
+ unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64));
+ struct ecdsa_raw_sig sig;
+
+ if (slen != 2 * keylen)
+ return -EINVAL;
+
+ ecc_digits_from_bytes(src, keylen, sig.r, ndigits);
+ ecc_digits_from_bytes(src + keylen, keylen, sig.s, ndigits);
+
+ return crypto_sig_verify(ctx->child, &sig, sizeof(sig), digest, dlen);
+}
+
+static unsigned int ecdsa_p1363_key_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_keysize(ctx->child);
+}
+
+static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
+}
+
+static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_digestsize(ctx->child);
+}
+
+static int ecdsa_p1363_set_pub_key(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_set_pubkey(ctx->child, key, keylen);
+}
+
+static int ecdsa_p1363_init_tfm(struct crypto_sig *tfm)
+{
+ struct sig_instance *inst = sig_alg_instance(tfm);
+ struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+ struct crypto_sig *child_tfm;
+
+ child_tfm = crypto_spawn_sig(spawn);
+ if (IS_ERR(child_tfm))
+ return PTR_ERR(child_tfm);
+
+ ctx->child = child_tfm;
+
+ return 0;
+}
+
+static void ecdsa_p1363_exit_tfm(struct crypto_sig *tfm)
+{
+ struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
+
+ crypto_free_sig(ctx->child);
+}
+
+static void ecdsa_p1363_free(struct sig_instance *inst)
+{
+ struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
+
+ crypto_drop_sig(spawn);
+ kfree(inst);
+}
+
+static int ecdsa_p1363_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_sig_spawn *spawn;
+ struct sig_instance *inst;
+ struct sig_alg *ecdsa_alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = sig_instance_ctx(inst);
+
+ err = crypto_grab_sig(spawn, sig_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+
+ ecdsa_alg = crypto_spawn_sig_alg(spawn);
+
+ err = -EINVAL;
+ if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name,
+ &ecdsa_alg->base);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority;
+ inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_p1363_ctx);
+
+ inst->alg.init = ecdsa_p1363_init_tfm;
+ inst->alg.exit = ecdsa_p1363_exit_tfm;
+
+ inst->alg.verify = ecdsa_p1363_verify;
+ inst->alg.key_size = ecdsa_p1363_key_size;
+ inst->alg.max_size = ecdsa_p1363_max_size;
+ inst->alg.digest_size = ecdsa_p1363_digest_size;
+ inst->alg.set_pub_key = ecdsa_p1363_set_pub_key;
+
+ inst->free = ecdsa_p1363_free;
+
+ err = sig_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ecdsa_p1363_free(inst);
+ }
+ return err;
+}
+
+struct crypto_template ecdsa_p1363_tmpl = {
+ .name = "p1363",
+ .create = ecdsa_p1363_create,
+ .module = THIS_MODULE,
+};
+
+MODULE_ALIAS_CRYPTO("p1363");
diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c
new file mode 100644
index 000000000000..ee71594d10a0
--- /dev/null
+++ b/crypto/ecdsa-x962.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ECDSA X9.62 signature encoding
+ *
+ * Copyright (c) 2021 IBM Corporation
+ * Copyright (c) 2024 Intel Corporation
+ */
+
+#include <linux/asn1_decoder.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/sig.h>
+
+#include "ecdsasignature.asn1.h"
+
+struct ecdsa_x962_ctx {
+ struct crypto_sig *child;
+};
+
+struct ecdsa_x962_signature_ctx {
+ struct ecdsa_raw_sig sig;
+ unsigned int ndigits;
+};
+
+/* Get the r and s components of a signature from the X.509 certificate. */
+static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen,
+ unsigned int ndigits)
+{
+ size_t bufsize = ndigits * sizeof(u64);
+ const char *d = value;
+
+ if (!value || !vlen || vlen > bufsize + 1)
+ return -EINVAL;
+
+ /*
+ * vlen may be 1 byte larger than bufsize due to a leading zero byte
+ * (necessary if the most significant bit of the integer is set).
+ */
+ if (vlen > bufsize) {
+ /* skip over leading zeros that make 'value' a positive int */
+ if (*d == 0) {
+ vlen -= 1;
+ d++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ ecc_digits_from_bytes(d, vlen, dest, ndigits);
+
+ return 0;
+}
+
+int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecdsa_x962_signature_ctx *sig_ctx = context;
+
+ return ecdsa_get_signature_rs(sig_ctx->sig.r, hdrlen, tag, value, vlen,
+ sig_ctx->ndigits);
+}
+
+int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecdsa_x962_signature_ctx *sig_ctx = context;
+
+ return ecdsa_get_signature_rs(sig_ctx->sig.s, hdrlen, tag, value, vlen,
+ sig_ctx->ndigits);
+}
+
+static int ecdsa_x962_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+ struct ecdsa_x962_signature_ctx sig_ctx;
+ int err;
+
+ sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ sizeof(u64) * BITS_PER_BYTE);
+
+ err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
+ if (err < 0)
+ return err;
+
+ return crypto_sig_verify(ctx->child, &sig_ctx.sig, sizeof(sig_ctx.sig),
+ digest, dlen);
+}
+
+static unsigned int ecdsa_x962_key_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_keysize(ctx->child);
+}
+
+static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+ struct sig_alg *alg = crypto_sig_alg(ctx->child);
+ int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
+
+ /*
+ * Verify takes ECDSA-Sig-Value (described in RFC 5480) as input,
+ * which is actually 2 'key_size'-bit integers encoded in ASN.1.
+ * Account for the ASN.1 encoding overhead here.
+ *
+ * NIST P192/256/384 may prepend a '0' to a coordinate to indicate
+ * a positive integer. NIST P521 never needs it.
+ */
+ if (strcmp(alg->base.cra_name, "ecdsa-nist-p521") != 0)
+ slen += 1;
+
+ /* Length of encoding the x & y coordinates */
+ slen = 2 * (slen + 2);
+
+ /*
+ * If coordinate encoding takes at least 128 bytes then an
+ * additional byte for length encoding is needed.
+ */
+ return 1 + (slen >= 128) + 1 + slen;
+}
+
+static unsigned int ecdsa_x962_digest_size(struct crypto_sig *tfm)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_digestsize(ctx->child);
+}
+
+static int ecdsa_x962_set_pub_key(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return crypto_sig_set_pubkey(ctx->child, key, keylen);
+}
+
+static int ecdsa_x962_init_tfm(struct crypto_sig *tfm)
+{
+ struct sig_instance *inst = sig_alg_instance(tfm);
+ struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+ struct crypto_sig *child_tfm;
+
+ child_tfm = crypto_spawn_sig(spawn);
+ if (IS_ERR(child_tfm))
+ return PTR_ERR(child_tfm);
+
+ ctx->child = child_tfm;
+
+ return 0;
+}
+
+static void ecdsa_x962_exit_tfm(struct crypto_sig *tfm)
+{
+ struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
+
+ crypto_free_sig(ctx->child);
+}
+
+static void ecdsa_x962_free(struct sig_instance *inst)
+{
+ struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
+
+ crypto_drop_sig(spawn);
+ kfree(inst);
+}
+
+static int ecdsa_x962_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_sig_spawn *spawn;
+ struct sig_instance *inst;
+ struct sig_alg *ecdsa_alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = sig_instance_ctx(inst);
+
+ err = crypto_grab_sig(spawn, sig_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+
+ ecdsa_alg = crypto_spawn_sig_alg(spawn);
+
+ err = -EINVAL;
+ if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name,
+ &ecdsa_alg->base);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority;
+ inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_x962_ctx);
+
+ inst->alg.init = ecdsa_x962_init_tfm;
+ inst->alg.exit = ecdsa_x962_exit_tfm;
+
+ inst->alg.verify = ecdsa_x962_verify;
+ inst->alg.key_size = ecdsa_x962_key_size;
+ inst->alg.max_size = ecdsa_x962_max_size;
+ inst->alg.digest_size = ecdsa_x962_digest_size;
+ inst->alg.set_pub_key = ecdsa_x962_set_pub_key;
+
+ inst->free = ecdsa_x962_free;
+
+ err = sig_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ecdsa_x962_free(inst);
+ }
+ return err;
+}
+
+struct crypto_template ecdsa_x962_tmpl = {
+ .name = "x962",
+ .create = ecdsa_x962_create,
+ .module = THIS_MODULE,
+};
+
+MODULE_ALIAS_CRYPTO("x962");
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index fbd76498aba8..ce8e4364842f 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -4,14 +4,11 @@
*/
#include <linux/module.h>
-#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
-#include <crypto/akcipher.h>
+#include <crypto/internal/sig.h>
#include <crypto/ecdh.h>
-#include <linux/asn1_decoder.h>
-#include <linux/scatterlist.h>
-
-#include "ecdsasignature.asn1.h"
+#include <crypto/sha2.h>
+#include <crypto/sig.h>
struct ecc_ctx {
unsigned int curve_id;
@@ -23,74 +20,6 @@ struct ecc_ctx {
struct ecc_point pub_key;
};
-struct ecdsa_signature_ctx {
- const struct ecc_curve *curve;
- u64 r[ECC_MAX_DIGITS];
- u64 s[ECC_MAX_DIGITS];
-};
-
-/*
- * Get the r and s components of a signature from the X509 certificate.
- */
-static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen, unsigned int ndigits)
-{
- size_t keylen = ndigits * sizeof(u64);
- ssize_t diff = vlen - keylen;
- const char *d = value;
- u8 rs[ECC_MAX_BYTES];
-
- if (!value || !vlen)
- return -EINVAL;
-
- /* diff = 0: 'value' has exacly the right size
- * diff > 0: 'value' has too many bytes; one leading zero is allowed that
- * makes the value a positive integer; error on more
- * diff < 0: 'value' is missing leading zeros, which we add
- */
- if (diff > 0) {
- /* skip over leading zeros that make 'value' a positive int */
- if (*d == 0) {
- vlen -= 1;
- diff--;
- d++;
- }
- if (diff)
- return -EINVAL;
- }
- if (-diff >= keylen)
- return -EINVAL;
-
- if (diff) {
- /* leading zeros not given in 'value' */
- memset(rs, 0, -diff);
- }
-
- memcpy(&rs[-diff], d, vlen);
-
- ecc_swap_digits((u64 *)rs, dest, ndigits);
-
- return 0;
-}
-
-int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
-{
- struct ecdsa_signature_ctx *sig = context;
-
- return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
- sig->curve->g.ndigits);
-}
-
-int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
-{
- struct ecdsa_signature_ctx *sig = context;
-
- return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
- sig->curve->g.ndigits);
-}
-
static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
{
const struct ecc_curve *curve = ctx->curve;
@@ -122,7 +51,7 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con
/* res.x = res.x mod n (if res.x > order) */
if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
- /* faster alternative for NIST p384, p256 & p192 */
+ /* faster alternative for NIST p521, p384, p256 & p192 */
vli_sub(res.x, res.x, curve->n, ndigits);
if (!vli_cmp(res.x, r, ndigits))
@@ -134,55 +63,27 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con
/*
* Verify an ECDSA signature.
*/
-static int ecdsa_verify(struct akcipher_request *req)
+static int ecdsa_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
- size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
- struct ecdsa_signature_ctx sig_ctx = {
- .curve = ctx->curve,
- };
- u8 rawhash[ECC_MAX_BYTES];
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
+ size_t bufsize = ctx->curve->g.ndigits * sizeof(u64);
+ const struct ecdsa_raw_sig *sig = src;
u64 hash[ECC_MAX_DIGITS];
- unsigned char *buffer;
- ssize_t diff;
- int ret;
if (unlikely(!ctx->pub_key_set))
return -EINVAL;
- buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- sg_pcopy_to_buffer(req->src,
- sg_nents_for_len(req->src, req->src_len + req->dst_len),
- buffer, req->src_len + req->dst_len, 0);
-
- ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
- buffer, req->src_len);
- if (ret < 0)
- goto error;
-
- /* if the hash is shorter then we will add leading zeros to fit to ndigits */
- diff = keylen - req->dst_len;
- if (diff >= 0) {
- if (diff)
- memset(rawhash, 0, diff);
- memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
- } else if (diff < 0) {
- /* given hash is longer, we take the left-most bytes */
- memcpy(&rawhash, buffer + req->src_len, keylen);
- }
-
- ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
+ if (slen != sizeof(*sig))
+ return -EINVAL;
- ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
+ if (bufsize > dlen)
+ bufsize = dlen;
-error:
- kfree(buffer);
+ ecc_digits_from_bytes(digest, bufsize, hash, ctx->curve->g.ndigits);
- return ret;
+ return _ecdsa_verify(ctx, hash, sig->r, sig->s);
}
static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
@@ -215,35 +116,39 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
}
/*
- * Set the public key given the raw uncompressed key data from an X509
- * certificate. The key data contain the concatenated X and Y coordinates of
- * the public key.
+ * Set the public ECC key as defined by RFC5480 section 2.2 "Subject Public
+ * Key". Only the uncompressed format is supported.
*/
-static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
+static int ecdsa_set_pub_key(struct crypto_sig *tfm, const void *key,
+ unsigned int keylen)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
+ unsigned int digitlen, ndigits;
const unsigned char *d = key;
- const u64 *digits = (const u64 *)&d[1];
- unsigned int ndigits;
int ret;
ret = ecdsa_ecc_ctx_reset(ctx);
if (ret < 0)
return ret;
- if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
+ if (keylen < 1 || ((keylen - 1) & 1) != 0)
return -EINVAL;
/* we only accept uncompressed format indicated by '4' */
if (d[0] != 4)
return -EINVAL;
keylen--;
- ndigits = (keylen >> 1) / sizeof(u64);
+ digitlen = keylen >> 1;
+
+ ndigits = DIV_ROUND_UP(digitlen, sizeof(u64));
if (ndigits != ctx->curve->g.ndigits)
return -EINVAL;
- ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
- ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
+ d++;
+
+ ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits);
+ ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits);
+
ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
ctx->pub_key_set = ret == 0;
@@ -251,31 +156,66 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig
return ret;
}
-static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
+static void ecdsa_exit_tfm(struct crypto_sig *tfm)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
ecdsa_ecc_ctx_deinit(ctx);
}
-static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
+static unsigned int ecdsa_key_size(struct crypto_sig *tfm)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
- return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ return ctx->curve->nbits;
+}
+
+static unsigned int ecdsa_digest_size(struct crypto_sig *tfm)
+{
+ /*
+ * ECDSA key sizes are much smaller than RSA, and thus could
+ * operate on (hashed) inputs that are larger than the key size.
+ * E.g. SHA384-hashed input used with secp256r1 based keys.
+ * Return the largest supported hash size (SHA512).
+ */
+ return SHA512_DIGEST_SIZE;
}
-static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
+static int ecdsa_nist_p521_init_tfm(struct crypto_sig *tfm)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P521);
+}
+
+static struct sig_alg ecdsa_nist_p521 = {
+ .verify = ecdsa_verify,
+ .set_pub_key = ecdsa_set_pub_key,
+ .key_size = ecdsa_key_size,
+ .digest_size = ecdsa_digest_size,
+ .init = ecdsa_nist_p521_init_tfm,
+ .exit = ecdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecdsa-nist-p521",
+ .cra_driver_name = "ecdsa-nist-p521-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecc_ctx),
+ },
+};
+
+static int ecdsa_nist_p384_init_tfm(struct crypto_sig *tfm)
+{
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
}
-static struct akcipher_alg ecdsa_nist_p384 = {
+static struct sig_alg ecdsa_nist_p384 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
- .max_size = ecdsa_max_size,
+ .key_size = ecdsa_key_size,
+ .digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p384_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@@ -287,17 +227,18 @@ static struct akcipher_alg ecdsa_nist_p384 = {
},
};
-static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
+static int ecdsa_nist_p256_init_tfm(struct crypto_sig *tfm)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
}
-static struct akcipher_alg ecdsa_nist_p256 = {
+static struct sig_alg ecdsa_nist_p256 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
- .max_size = ecdsa_max_size,
+ .key_size = ecdsa_key_size,
+ .digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p256_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@@ -309,17 +250,18 @@ static struct akcipher_alg ecdsa_nist_p256 = {
},
};
-static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
+static int ecdsa_nist_p192_init_tfm(struct crypto_sig *tfm)
{
- struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
}
-static struct akcipher_alg ecdsa_nist_p192 = {
+static struct sig_alg ecdsa_nist_p192 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
- .max_size = ecdsa_max_size,
+ .key_size = ecdsa_key_size,
+ .digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p192_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@@ -337,40 +279,69 @@ static int __init ecdsa_init(void)
int ret;
/* NIST p192 may not be available in FIPS mode */
- ret = crypto_register_akcipher(&ecdsa_nist_p192);
+ ret = crypto_register_sig(&ecdsa_nist_p192);
ecdsa_nist_p192_registered = ret == 0;
- ret = crypto_register_akcipher(&ecdsa_nist_p256);
+ ret = crypto_register_sig(&ecdsa_nist_p256);
if (ret)
goto nist_p256_error;
- ret = crypto_register_akcipher(&ecdsa_nist_p384);
+ ret = crypto_register_sig(&ecdsa_nist_p384);
if (ret)
goto nist_p384_error;
+ ret = crypto_register_sig(&ecdsa_nist_p521);
+ if (ret)
+ goto nist_p521_error;
+
+ ret = crypto_register_template(&ecdsa_x962_tmpl);
+ if (ret)
+ goto x962_tmpl_error;
+
+ ret = crypto_register_template(&ecdsa_p1363_tmpl);
+ if (ret)
+ goto p1363_tmpl_error;
+
return 0;
+p1363_tmpl_error:
+ crypto_unregister_template(&ecdsa_x962_tmpl);
+
+x962_tmpl_error:
+ crypto_unregister_sig(&ecdsa_nist_p521);
+
+nist_p521_error:
+ crypto_unregister_sig(&ecdsa_nist_p384);
+
nist_p384_error:
- crypto_unregister_akcipher(&ecdsa_nist_p256);
+ crypto_unregister_sig(&ecdsa_nist_p256);
nist_p256_error:
if (ecdsa_nist_p192_registered)
- crypto_unregister_akcipher(&ecdsa_nist_p192);
+ crypto_unregister_sig(&ecdsa_nist_p192);
return ret;
}
static void __exit ecdsa_exit(void)
{
+ crypto_unregister_template(&ecdsa_x962_tmpl);
+ crypto_unregister_template(&ecdsa_p1363_tmpl);
+
if (ecdsa_nist_p192_registered)
- crypto_unregister_akcipher(&ecdsa_nist_p192);
- crypto_unregister_akcipher(&ecdsa_nist_p256);
- crypto_unregister_akcipher(&ecdsa_nist_p384);
+ crypto_unregister_sig(&ecdsa_nist_p192);
+ crypto_unregister_sig(&ecdsa_nist_p256);
+ crypto_unregister_sig(&ecdsa_nist_p384);
+ crypto_unregister_sig(&ecdsa_nist_p521);
}
-subsys_initcall(ecdsa_init);
+module_init(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
MODULE_DESCRIPTION("ECDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p521");
MODULE_ALIAS_CRYPTO("ecdsa-generic");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 69686668625e..e0a2d3209938 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req)
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
- int err;
if (req->cryptlen < ivsize)
return -EINVAL;
@@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
@@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-subsys_initcall(echainiv_module_init);
+module_init(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index f3c6b5e15e75..2c0602f0cd40 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -18,12 +18,11 @@
#include <linux/module.h>
#include <linux/crypto.h>
+#include <crypto/sig.h>
#include <crypto/streebog.h>
-#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
-#include <crypto/akcipher.h>
+#include <crypto/internal/sig.h>
#include <linux/oid_registry.h>
-#include <linux/scatterlist.h>
#include "ecrdsa_params.asn1.h"
#include "ecrdsa_pub_key.asn1.h"
#include "ecrdsa_defs.h"
@@ -68,13 +67,12 @@ static const struct ecc_curve *get_curve_by_oid(enum OID oid)
}
}
-static int ecrdsa_verify(struct akcipher_request *req)
+static int ecrdsa_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- unsigned char sig[ECRDSA_MAX_SIG_SIZE];
- unsigned char digest[STREEBOG512_DIGEST_SIZE];
- unsigned int ndigits = req->dst_len / sizeof(u64);
+ struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
+ unsigned int ndigits = dlen / sizeof(u64);
u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
@@ -91,25 +89,19 @@ static int ecrdsa_verify(struct akcipher_request *req)
*/
if (!ctx->curve ||
!ctx->digest ||
- !req->src ||
+ !src ||
+ !digest ||
!ctx->pub_key.x ||
- req->dst_len != ctx->digest_len ||
- req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
+ dlen != ctx->digest_len ||
+ dlen != ctx->curve->g.ndigits * sizeof(u64) ||
ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
- req->dst_len * 2 != req->src_len ||
- WARN_ON(req->src_len > sizeof(sig)) ||
- WARN_ON(req->dst_len > sizeof(digest)))
+ dlen * 2 != slen ||
+ WARN_ON(slen > ECRDSA_MAX_SIG_SIZE) ||
+ WARN_ON(dlen > STREEBOG512_DIGEST_SIZE))
return -EBADMSG;
- sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
- sig, req->src_len);
- sg_pcopy_to_buffer(req->src,
- sg_nents_for_len(req->src,
- req->src_len + req->dst_len),
- digest, req->dst_len, req->src_len);
-
- vli_from_be64(s, sig, ndigits);
- vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
+ vli_from_be64(s, src, ndigits);
+ vli_from_be64(r, src + ndigits * sizeof(u64), ndigits);
/* Step 1: verify that 0 < r < q, 0 < s < q */
if (vli_is_zero(r, ndigits) ||
@@ -188,10 +180,10 @@ static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
}
/* Parse BER encoded subjectPublicKey. */
-static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+static int ecrdsa_set_pub_key(struct crypto_sig *tfm, const void *key,
unsigned int keylen)
{
- struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int ndigits;
u32 algo, paramlen;
u8 *params;
@@ -249,24 +241,32 @@ static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return 0;
}
-static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
+static unsigned int ecrdsa_key_size(struct crypto_sig *tfm)
{
- struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
/*
* Verify doesn't need any output, so it's just informational
* for keyctl to determine the key bit size.
*/
- return ctx->pub_key.ndigits * sizeof(u64);
+ return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE;
+}
+
+static unsigned int ecrdsa_max_size(struct crypto_sig *tfm)
+{
+ struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return 2 * ctx->pub_key.ndigits * sizeof(u64);
}
-static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
+static void ecrdsa_exit_tfm(struct crypto_sig *tfm)
{
}
-static struct akcipher_alg ecrdsa_alg = {
+static struct sig_alg ecrdsa_alg = {
.verify = ecrdsa_verify,
.set_pub_key = ecrdsa_set_pub_key,
+ .key_size = ecrdsa_key_size,
.max_size = ecrdsa_max_size,
.exit = ecrdsa_exit_tfm,
.base = {
@@ -280,12 +280,12 @@ static struct akcipher_alg ecrdsa_alg = {
static int __init ecrdsa_mod_init(void)
{
- return crypto_register_akcipher(&ecrdsa_alg);
+ return crypto_register_sig(&ecrdsa_alg);
}
static void __exit ecrdsa_mod_fini(void)
{
- crypto_unregister_akcipher(&ecrdsa_alg);
+ crypto_unregister_sig(&ecrdsa_alg);
}
module_init(ecrdsa_mod_init);
@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa");
MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
index 0056335b9d03..1c2c2449e331 100644
--- a/crypto/ecrdsa_defs.h
+++ b/crypto/ecrdsa_defs.h
@@ -47,6 +47,7 @@ static u64 cp256a_b[] = {
static struct ecc_curve gost_cp256a = {
.name = "cp256a",
+ .nbits = 256,
.g = {
.x = cp256a_g_x,
.y = cp256a_g_y,
@@ -80,6 +81,7 @@ static u64 cp256b_b[] = {
static struct ecc_curve gost_cp256b = {
.name = "cp256b",
+ .nbits = 256,
.g = {
.x = cp256b_g_x,
.y = cp256b_g_y,
@@ -117,6 +119,7 @@ static u64 cp256c_b[] = {
static struct ecc_curve gost_cp256c = {
.name = "cp256c",
+ .nbits = 256,
.g = {
.x = cp256c_g_x,
.y = cp256c_g_y,
@@ -166,6 +169,7 @@ static u64 tc512a_b[] = {
static struct ecc_curve gost_tc512a = {
.name = "tc512a",
+ .nbits = 512,
.g = {
.x = tc512a_g_x,
.y = tc512a_g_y,
@@ -211,6 +215,7 @@ static u64 tc512b_b[] = {
static struct ecc_curve gost_tc512b = {
.name = "tc512b",
+ .nbits = 512,
.g = {
.x = tc512b_g_x,
.y = tc512b_g_y,
diff --git a/crypto/essiv.c b/crypto/essiv.c
index e33369df9034..a47a3eab6935 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -131,9 +131,9 @@ static int essiv_aead_setauthsize(struct crypto_aead *tfm,
return crypto_aead_setauthsize(tctx->u.aead, authsize);
}
-static void essiv_skcipher_done(struct crypto_async_request *areq, int err)
+static void essiv_skcipher_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
skcipher_request_complete(req, err);
}
@@ -166,12 +166,17 @@ static int essiv_skcipher_decrypt(struct skcipher_request *req)
return essiv_skcipher_crypt(req, false);
}
-static void essiv_aead_done(struct crypto_async_request *areq, int err)
+static void essiv_aead_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ if (err == -EINPROGRESS)
+ goto out;
+
kfree(rctx->assoc);
+
+out:
aead_request_complete(req, err);
}
@@ -181,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
struct aead_request *subreq = &rctx->aead_req;
+ int ivsize = crypto_aead_ivsize(tfm);
+ int ssize = req->assoclen - ivsize;
struct scatterlist *src = req->src;
int err;
+ if (ssize < 0)
+ return -EINVAL;
+
crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
/*
@@ -193,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
*/
rctx->assoc = NULL;
if (req->src == req->dst || !enc) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- req->assoclen - crypto_aead_ivsize(tfm),
- crypto_aead_ivsize(tfm), 1);
+ scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1);
} else {
u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset;
- int ivsize = crypto_aead_ivsize(tfm);
- int ssize = req->assoclen - ivsize;
struct scatterlist *sg;
int nents;
- if (ssize < 0)
- return -EINVAL;
-
nents = sg_nents_for_len(req->src, ssize);
if (nents < 0)
return -EINVAL;
@@ -247,7 +250,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
err = enc ? crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
- if (rctx->assoc && err != -EINPROGRESS)
+ if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
kfree(rctx->assoc);
return err;
}
@@ -400,8 +403,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name)
if (len >= CRYPTO_MAX_ALG_NAME)
return false;
- memcpy(essiv_cipher_name, p, len);
- essiv_cipher_name[len] = '\0';
+ strscpy(essiv_cipher_name, p, len + 1);
return true;
}
@@ -437,6 +439,7 @@ out:
static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
{
+ struct skcipher_alg_common *skcipher_alg = NULL;
struct crypto_attr_type *algt;
const char *inner_cipher_name;
const char *shash_name;
@@ -445,7 +448,6 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_instance *inst;
struct crypto_alg *base, *block_base;
struct essiv_instance_ctx *ictx;
- struct skcipher_alg *skcipher_alg = NULL;
struct aead_alg *aead_alg = NULL;
struct crypto_alg *_hash_alg;
struct shash_alg *hash_alg;
@@ -470,7 +472,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
mask = crypto_algt_inherited_mask(algt);
switch (type) {
- case CRYPTO_ALG_TYPE_SKCIPHER:
+ case CRYPTO_ALG_TYPE_LSKCIPHER:
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!skcipher_inst)
@@ -484,9 +486,10 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
- skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn);
+ skcipher_alg = crypto_spawn_skcipher_alg_common(
+ &ictx->u.skcipher_spawn);
block_base = &skcipher_alg->base;
- ivsize = crypto_skcipher_alg_ivsize(skcipher_alg);
+ ivsize = skcipher_alg->ivsize;
break;
case CRYPTO_ALG_TYPE_AEAD:
@@ -543,8 +546,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
- strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME);
+ strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name);
/* Instance fields */
@@ -569,18 +571,17 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
base->cra_alignmask = block_base->cra_alignmask;
base->cra_priority = block_base->cra_priority;
- if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
+ if (type == CRYPTO_ALG_TYPE_LSKCIPHER) {
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
skcipher_inst->alg.init = essiv_skcipher_init_tfm;
skcipher_inst->alg.exit = essiv_skcipher_exit_tfm;
- skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg);
- skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg);
+ skcipher_inst->alg.min_keysize = skcipher_alg->min_keysize;
+ skcipher_inst->alg.max_keysize = skcipher_alg->max_keysize;
skcipher_inst->alg.ivsize = ivsize;
- skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg);
- skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg);
+ skcipher_inst->alg.chunksize = skcipher_alg->chunksize;
skcipher_inst->free = essiv_skcipher_free_instance;
@@ -611,7 +612,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
out_free_hash:
crypto_mod_put(_hash_alg);
out_drop_skcipher:
- if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+ if (type == CRYPTO_ALG_TYPE_LSKCIPHER)
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
else
crypto_drop_aead(&ictx->u.aead_spawn);
@@ -638,10 +639,10 @@ static void __exit essiv_module_exit(void)
crypto_unregister_template(&essiv_tmpl);
}
-subsys_initcall(essiv_module_init);
+module_init(essiv_module_init);
module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("essiv");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 76a04d000c0d..80036835cec5 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -43,10 +43,10 @@
*/
#include <asm/byteorder.h>
+#include <crypto/algapi.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#define ROUNDS 16
@@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-subsys_initcall(fcrypt_mod_init);
+module_init(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index b05d3c7b3ca5..65d2bc070a26 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
+#include <linux/string_choices.h>
#include <generated/utsrelease.h>
int fips_enabled;
@@ -23,9 +24,11 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
static int fips_enable(char *str)
{
- fips_enabled = !!simple_strtol(str, NULL, 0);
- printk(KERN_INFO "fips mode: %s\n",
- fips_enabled ? "enabled" : "disabled");
+ if (kstrtoint(str, 0, &fips_enabled))
+ return 0;
+
+ fips_enabled = !!fips_enabled;
+ pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}
@@ -41,7 +44,7 @@ __setup("fips=", fips_enable);
static char fips_name[] = FIPS_MODULE_NAME;
static char fips_version[] = FIPS_MODULE_VERSION;
-static struct ctl_table crypto_sysctl_table[] = {
+static const struct ctl_table crypto_sysctl_table[] = {
{
.procname = "fips_enabled",
.data = &fips_enabled,
@@ -63,23 +66,13 @@ static struct ctl_table crypto_sysctl_table[] = {
.mode = 0444,
.proc_handler = proc_dostring
},
- {}
-};
-
-static struct ctl_table crypto_dir_table[] = {
- {
- .procname = "crypto",
- .mode = 0555,
- .child = crypto_sysctl_table
- },
- {}
};
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
- crypto_sysctls = register_sysctl_table(crypto_dir_table);
+ crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
}
static void crypto_proc_fips_exit(void)
@@ -105,5 +98,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-subsys_initcall(fips_init);
+module_init(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 338ee0769747..97716482bed0 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -9,7 +9,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
@@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -79,8 +77,6 @@ static struct {
struct scatterlist sg;
} *gcm_zeroes;
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
-
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -197,7 +193,7 @@ static inline unsigned int gcm_remain(unsigned int len)
return len ? 16 - len : 0;
}
-static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
+static void gcm_hash_len_done(void *data, int err);
static int gcm_hash_update(struct aead_request *req,
crypto_completion_t compl,
@@ -246,9 +242,9 @@ static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
return gctx->complete(req, flags);
}
-static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_len_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -267,10 +263,9 @@ static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
gcm_hash_len_continue(req, flags);
}
-static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_crypt_remain_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -298,9 +293,9 @@ static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
return gcm_hash_crypt_remain_continue(req, flags);
}
-static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_crypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -326,10 +321,9 @@ static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
return gcm_hash_crypt_remain_continue(req, flags);
}
-static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_assoc_remain_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -355,9 +349,9 @@ static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
return gcm_hash_assoc_remain_continue(req, flags);
}
-static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_assoc_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -380,9 +374,9 @@ static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
return gcm_hash_assoc_remain_continue(req, flags);
}
-static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_init_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -433,9 +427,9 @@ static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
return gcm_hash(req, flags);
}
-static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_encrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -477,9 +471,9 @@ static int crypto_gcm_verify(struct aead_request *req)
return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
-static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_decrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (!err)
err = crypto_gcm_verify(req);
@@ -578,10 +572,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *ghash_name)
{
+ struct skcipher_alg_common *ctr;
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
- struct skcipher_alg *ctr;
struct hash_alg_common *ghash;
int err;
@@ -609,13 +603,12 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr_name, 0, mask);
if (err)
goto err_free_inst;
- ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
+ ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
- crypto_skcipher_alg_ivsize(ctr) != 16 ||
- ctr->base.cra_blocksize != 1)
+ ctr->ivsize != 16 || ctr->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
@@ -632,11 +625,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
- ctr->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
inst->alg.ivsize = GCM_AES_IV_SIZE;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
+ inst->alg.chunksize = ctr->chunksize;
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
inst->alg.exit = crypto_gcm_exit_tfm;
@@ -934,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
- int err;
if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, enc);
- if (err)
- return err;
+ unsigned int nbytes = req->assoclen + req->cryptlen -
+ (enc ? 0 : authsize);
+
+ memcpy_sglist(req->dst, req->src, nbytes);
}
memcpy(iv, ctx->nonce, 4);
@@ -956,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
- unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int nbytes = req->assoclen + req->cryptlen -
- (enc ? 0 : authsize);
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
-
- skcipher_request_set_sync_tfm(nreq, ctx->null);
- skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
-
- return crypto_skcipher_encrypt(nreq);
-}
-
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
@@ -991,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_sync_skcipher *null;
unsigned long align;
- int err = 0;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_aead;
-
ctx->child = aead;
- ctx->null = null;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
@@ -1016,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
align + GCM_AES_IV_SIZE);
return 0;
-
-err_free_aead:
- crypto_free_aead(aead);
- return err;
}
static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
@@ -1027,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1156,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-subsys_initcall(crypto_gcm_module_init);
+module_init(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/geniv.c b/crypto/geniv.c
index bee4621b4f12..42eff6a7387c 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -9,7 +9,6 @@
#include <crypto/internal/geniv.h>
#include <crypto/internal/rng.h>
-#include <crypto/null.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->sknull);
- if (IS_ERR(ctx->sknull))
- goto out;
-
child = crypto_spawn_aead(aead_instance_ctx(inst));
err = PTR_ERR(child);
if (IS_ERR(child))
- goto drop_null;
+ goto out;
ctx->child = child;
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
@@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead)
out:
return err;
-
-drop_null:
- crypto_put_default_null_skcipher();
- goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
deleted file mode 100644
index a69ae3e6c16c..000000000000
--- a/crypto/gf128mul.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/* gf128mul.c - GF(2^128) multiplication functions
- *
- * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
- * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
- *
- * Based on Dr Brian Gladman's (GPL'd) work published at
- * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
- * See the original copyright notice below.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-/*
- ---------------------------------------------------------------------------
- Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
-
- LICENSE TERMS
-
- The free distribution and use of this software in both source and binary
- form is allowed (with or without changes) provided that:
-
- 1. distributions of this source code include the above copyright
- notice, this list of conditions and the following disclaimer;
-
- 2. distributions in binary form include the above copyright
- notice, this list of conditions and the following disclaimer
- in the documentation and/or other associated materials;
-
- 3. the copyright holder's name is not used to endorse products
- built using this software without specific written permission.
-
- ALTERNATIVELY, provided that this notice is retained in full, this product
- may be distributed under the terms of the GNU General Public License (GPL),
- in which case the provisions of the GPL apply INSTEAD OF those given above.
-
- DISCLAIMER
-
- This software is provided 'as is' with no explicit or implied warranties
- in respect of its properties, including, but not limited to, correctness
- and/or fitness for purpose.
- ---------------------------------------------------------------------------
- Issue 31/01/2006
-
- This file provides fast multiplication in GF(2^128) as required by several
- cryptographic authentication modes
-*/
-
-#include <crypto/gf128mul.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#define gf128mul_dat(q) { \
- q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
- q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
- q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
- q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
- q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
- q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
- q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
- q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
- q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
- q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
- q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
- q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
- q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
- q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
- q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
- q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
- q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
- q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
- q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
- q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
- q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
- q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
- q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
- q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
- q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
- q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
- q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
- q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
- q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
- q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
- q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
- q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
-}
-
-/*
- * Given a value i in 0..255 as the byte overflow when a field element
- * in GF(2^128) is multiplied by x^8, the following macro returns the
- * 16-bit value that must be XOR-ed into the low-degree end of the
- * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
- *
- * There are two versions of the macro, and hence two tables: one for
- * the "be" convention where the highest-order bit is the coefficient of
- * the highest-degree polynomial term, and one for the "le" convention
- * where the highest-order bit is the coefficient of the lowest-degree
- * polynomial term. In both cases the values are stored in CPU byte
- * endianness such that the coefficients are ordered consistently across
- * bytes, i.e. in the "be" table bits 15..0 of the stored value
- * correspond to the coefficients of x^15..x^0, and in the "le" table
- * bits 15..0 correspond to the coefficients of x^0..x^15.
- *
- * Therefore, provided that the appropriate byte endianness conversions
- * are done by the multiplication functions (and these must be in place
- * anyway to support both little endian and big endian CPUs), the "be"
- * table can be used for multiplications of both "bbe" and "ble"
- * elements, and the "le" table can be used for multiplications of both
- * "lle" and "lbe" elements.
- */
-
-#define xda_be(i) ( \
- (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
- (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
- (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
- (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
-)
-
-#define xda_le(i) ( \
- (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
- (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
- (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
- (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
-)
-
-static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
-static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
-
-/*
- * The following functions multiply a field element by x^8 in
- * the polynomial field representation. They use 64-bit word operations
- * to gain speed but compensate for machine endianness and hence work
- * correctly on both styles of machine.
- */
-
-static void gf128mul_x8_lle(be128 *x)
-{
- u64 a = be64_to_cpu(x->a);
- u64 b = be64_to_cpu(x->b);
- u64 _tt = gf128mul_table_le[b & 0xff];
-
- x->b = cpu_to_be64((b >> 8) | (a << 56));
- x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
-}
-
-static void gf128mul_x8_bbe(be128 *x)
-{
- u64 a = be64_to_cpu(x->a);
- u64 b = be64_to_cpu(x->b);
- u64 _tt = gf128mul_table_be[a >> 56];
-
- x->a = cpu_to_be64((a << 8) | (b >> 56));
- x->b = cpu_to_be64((b << 8) ^ _tt);
-}
-
-void gf128mul_x8_ble(le128 *r, const le128 *x)
-{
- u64 a = le64_to_cpu(x->a);
- u64 b = le64_to_cpu(x->b);
- u64 _tt = gf128mul_table_be[a >> 56];
-
- r->a = cpu_to_le64((a << 8) | (b >> 56));
- r->b = cpu_to_le64((b << 8) ^ _tt);
-}
-EXPORT_SYMBOL(gf128mul_x8_ble);
-
-void gf128mul_lle(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_lle(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[15 - i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[0]);
- if (ch & 0x40)
- be128_xor(r, r, &p[1]);
- if (ch & 0x20)
- be128_xor(r, r, &p[2]);
- if (ch & 0x10)
- be128_xor(r, r, &p[3]);
- if (ch & 0x08)
- be128_xor(r, r, &p[4]);
- if (ch & 0x04)
- be128_xor(r, r, &p[5]);
- if (ch & 0x02)
- be128_xor(r, r, &p[6]);
- if (ch & 0x01)
- be128_xor(r, r, &p[7]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_lle(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_lle);
-
-void gf128mul_bbe(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_bbe(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[7]);
- if (ch & 0x40)
- be128_xor(r, r, &p[6]);
- if (ch & 0x20)
- be128_xor(r, r, &p[5]);
- if (ch & 0x10)
- be128_xor(r, r, &p[4]);
- if (ch & 0x08)
- be128_xor(r, r, &p[3]);
- if (ch & 0x04)
- be128_xor(r, r, &p[2]);
- if (ch & 0x02)
- be128_xor(r, r, &p[1]);
- if (ch & 0x01)
- be128_xor(r, r, &p[0]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_bbe(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_bbe);
-
-/* This version uses 64k bytes of table space.
- A 16 byte buffer has to be multiplied by a 16 byte key
- value in GF(2^128). If we consider a GF(2^128) value in
- the buffer's lowest byte, we can construct a table of
- the 256 16 byte values that result from the 256 values
- of this byte. This requires 4096 bytes. But we also
- need tables for each of the 16 higher bytes in the
- buffer as well, which makes 64 kbytes in total.
-*/
-/* additional explanation
- * t[0][BYTE] contains g*BYTE
- * t[1][BYTE] contains g*x^8*BYTE
- * ..
- * t[15][BYTE] contains g*x^120*BYTE */
-struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
-{
- struct gf128mul_64k *t;
- int i, j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- for (i = 0; i < 16; i++) {
- t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
- if (!t->t[i]) {
- gf128mul_free_64k(t);
- t = NULL;
- goto out;
- }
- }
-
- t->t[0]->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
-
- for (i = 0;;) {
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[i]->t[j + k],
- &t->t[i]->t[j], &t->t[i]->t[k]);
-
- if (++i >= 16)
- break;
-
- for (j = 128; j > 0; j >>= 1) {
- t->t[i]->t[j] = t->t[i - 1]->t[j];
- gf128mul_x8_bbe(&t->t[i]->t[j]);
- }
- }
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_64k_bbe);
-
-void gf128mul_free_64k(struct gf128mul_64k *t)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- kfree_sensitive(t->t[i]);
- kfree_sensitive(t);
-}
-EXPORT_SYMBOL(gf128mul_free_64k);
-
-void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i;
-
- *r = t->t[0]->t[ap[15]];
- for (i = 1; i < 16; ++i)
- be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_64k_bbe);
-
-/* This version uses 4k bytes of table space.
- A 16 byte buffer has to be multiplied by a 16 byte key
- value in GF(2^128). If we consider a GF(2^128) value in a
- single byte, we can construct a table of the 256 16 byte
- values that result from the 256 values of this byte.
- This requires 4096 bytes. If we take the highest byte in
- the buffer and use this table to get the result, we then
- have to multiply by x^120 to get the final value. For the
- next highest byte the result has to be multiplied by x^112
- and so on. But we can do this by accumulating the result
- in an accumulator starting with the result for the top
- byte. We repeatedly multiply the accumulator value by
- x^8 and then add in (i.e. xor) the 16 bytes of the next
- lower byte in the buffer, stopping when we reach the
- lowest byte. This requires a 4096 byte table.
-*/
-struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[128] = *g;
- for (j = 64; j > 0; j >>= 1)
- gf128mul_x_lle(&t->t[j], &t->t[j+j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_lle);
-
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_bbe);
-
-void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 15;
-
- *r = t->t[ap[15]];
- while (i--) {
- gf128mul_x8_lle(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_lle);
-
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 0;
-
- *r = t->t[ap[0]];
- while (++i < 16) {
- gf128mul_x8_bbe(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_bbe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index c70d163c1ac9..e5803c249c12 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,14 +34,14 @@
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
static int ghash_init(struct shash_desc *desc)
{
@@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- int n = min(srclen, dctx->bytes);
- u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
- }
-
- if (srclen) {
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
+ } while (srclen >= GHASH_BLOCK_SIZE);
- return 0;
+ return srclen;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static void ghash_flush(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
{
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- while (dctx->bytes--)
- *tmp++ ^= 0;
-
+ if (len) {
+ crypto_xor(dst, src, len);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
-
- dctx->bytes = 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- ghash_flush(ctx, dctx);
+ ghash_flush(desc, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
@@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-subsys_initcall(ghash_mod_init);
+module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hash.h b/crypto/hash.h
new file mode 100644
index 000000000000..cf9aee07f77d
--- /dev/null
+++ b/crypto/hash.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_HASH_H
+#define _LOCAL_CRYPTO_HASH_H
+
+#include <crypto/internal/hash.h>
+
+#include "internal.h"
+
+extern const struct crypto_type crypto_shash_type;
+
+int hash_prepare_alg(struct hash_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_HASH_H */
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
deleted file mode 100644
index a49ff96bde77..000000000000
--- a/crypto/hash_info.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Hash Info: Hash algorithms information
- *
- * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
- */
-
-#include <linux/export.h>
-#include <crypto/hash_info.h>
-
-const char *const hash_algo_name[HASH_ALGO__LAST] = {
- [HASH_ALGO_MD4] = "md4",
- [HASH_ALGO_MD5] = "md5",
- [HASH_ALGO_SHA1] = "sha1",
- [HASH_ALGO_RIPE_MD_160] = "rmd160",
- [HASH_ALGO_SHA256] = "sha256",
- [HASH_ALGO_SHA384] = "sha384",
- [HASH_ALGO_SHA512] = "sha512",
- [HASH_ALGO_SHA224] = "sha224",
- [HASH_ALGO_RIPE_MD_128] = "rmd128",
- [HASH_ALGO_RIPE_MD_256] = "rmd256",
- [HASH_ALGO_RIPE_MD_320] = "rmd320",
- [HASH_ALGO_WP_256] = "wp256",
- [HASH_ALGO_WP_384] = "wp384",
- [HASH_ALGO_WP_512] = "wp512",
- [HASH_ALGO_TGR_128] = "tgr128",
- [HASH_ALGO_TGR_160] = "tgr160",
- [HASH_ALGO_TGR_192] = "tgr192",
- [HASH_ALGO_SM3_256] = "sm3",
- [HASH_ALGO_STREEBOG_256] = "streebog256",
- [HASH_ALGO_STREEBOG_512] = "streebog512",
-};
-EXPORT_SYMBOL_GPL(hash_algo_name);
-
-const int hash_digest_size[HASH_ALGO__LAST] = {
- [HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
- [HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
- [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
- [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
- [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
- [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
- [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
- [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
- [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
- [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
- [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
- [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
- [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
- [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
- [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
- [HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE,
- [HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE,
-};
-EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index 7d00a3bcb667..f4cd6c29b4d3 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -17,7 +17,6 @@
*/
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/polyval.h>
#include <crypto/scatterwalk.h>
@@ -37,23 +36,14 @@
struct hctr2_instance_ctx {
struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_skcipher_spawn xctr_spawn;
- struct crypto_shash_spawn polyval_spawn;
};
struct hctr2_tfm_ctx {
struct crypto_cipher *blockcipher;
struct crypto_skcipher *xctr;
- struct crypto_shash *polyval;
+ struct polyval_key poly_key;
+ struct polyval_elem hashed_tweaklens[2];
u8 L[BLOCKCIPHER_BLOCK_SIZE];
- int hashed_tweak_offset;
- /*
- * This struct is allocated with extra space for two exported hash
- * states. Since the hash state size is not known at compile-time, we
- * can't add these to the struct directly.
- *
- * hashed_tweaklen_divisible;
- * hashed_tweaklen_remainder;
- */
};
struct hctr2_request_ctx {
@@ -63,39 +53,17 @@ struct hctr2_request_ctx {
struct scatterlist *bulk_part_src;
struct scatterlist sg_src[2];
struct scatterlist sg_dst[2];
+ struct polyval_elem hashed_tweak;
/*
- * Sub-request sizes are unknown at compile-time, so they need to go
- * after the members with known sizes.
+ * skcipher sub-request size is unknown at compile-time, so it needs to
+ * go after the members with known sizes.
*/
union {
- struct shash_desc hash_desc;
+ struct polyval_ctx poly_ctx;
struct skcipher_request xctr_req;
} u;
- /*
- * This struct is allocated with extra space for one exported hash
- * state. Since the hash state size is not known at compile-time, we
- * can't add it to the struct directly.
- *
- * hashed_tweak;
- */
};
-static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx,
- bool has_remainder)
-{
- u8 *p = (u8 *)tctx + sizeof(*tctx);
-
- if (has_remainder) /* For messages not a multiple of block length */
- p += crypto_shash_statesize(tctx->polyval);
- return p;
-}
-
-static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
- struct hctr2_request_ctx *rctx)
-{
- return (u8 *)rctx + tctx->hashed_tweak_offset;
-}
-
/*
* The input data for each HCTR2 hash step begins with a 16-byte block that
* contains the tweak length and a flag that indicates whether the input is evenly
@@ -106,24 +74,23 @@ static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
*
* These precomputed hashes are stored in hctr2_tfm_ctx.
*/
-static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder)
+static void hctr2_hash_tweaklens(struct hctr2_tfm_ctx *tctx)
{
- SHASH_DESC_ON_STACK(shash, tfm->polyval);
- __le64 tweak_length_block[2];
- int err;
-
- shash->tfm = tctx->polyval;
- memset(tweak_length_block, 0, sizeof(tweak_length_block));
-
- tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder);
- err = crypto_shash_init(shash);
- if (err)
- return err;
- err = crypto_shash_update(shash, (u8 *)tweak_length_block,
- POLYVAL_BLOCK_SIZE);
- if (err)
- return err;
- return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder));
+ struct polyval_ctx ctx;
+
+ for (int has_remainder = 0; has_remainder < 2; has_remainder++) {
+ const __le64 tweak_length_block[2] = {
+ cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder),
+ };
+
+ polyval_init(&ctx, &tctx->poly_key);
+ polyval_update(&ctx, (const u8 *)&tweak_length_block,
+ sizeof(tweak_length_block));
+ static_assert(sizeof(tweak_length_block) == POLYVAL_BLOCK_SIZE);
+ polyval_export_blkaligned(
+ &ctx, &tctx->hashed_tweaklens[has_remainder]);
+ }
+ memzero_explicit(&ctx, sizeof(ctx));
}
static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -156,51 +123,42 @@ static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
tctx->L[0] = 0x01;
crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L);
- crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK);
- crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE);
- if (err)
- return err;
+ static_assert(sizeof(hbar) == POLYVAL_BLOCK_SIZE);
+ polyval_preparekey(&tctx->poly_key, hbar);
memzero_explicit(hbar, sizeof(hbar));
- return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false);
+ hctr2_hash_tweaklens(tctx);
+ return 0;
}
-static int hctr2_hash_tweak(struct skcipher_request *req)
+static void hctr2_hash_tweak(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
- int err;
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE;
- hash_desc->tfm = tctx->polyval;
- err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder));
- if (err)
- return err;
- err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE);
- if (err)
- return err;
+ polyval_import_blkaligned(poly_ctx, &tctx->poly_key,
+ &tctx->hashed_tweaklens[has_remainder]);
+ polyval_update(poly_ctx, req->iv, TWEAK_SIZE);
// Store the hashed tweak, since we need it when computing both
// H(T || N) and H(T || V).
- return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx));
+ static_assert(TWEAK_SIZE % POLYVAL_BLOCK_SIZE == 0);
+ polyval_export_blkaligned(poly_ctx, &rctx->hashed_tweak);
}
-static int hctr2_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl,
- u8 digest[POLYVAL_DIGEST_SIZE])
+static void hctr2_hash_message(struct skcipher_request *req,
+ struct scatterlist *sgl,
+ u8 digest[POLYVAL_DIGEST_SIZE])
{
- static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 };
+ static const u8 padding = 0x1;
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct sg_mapping_iter miter;
- unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE;
int i;
- int err = 0;
int n = 0;
sg_miter_start(&miter, sgl, sg_nents(sgl),
@@ -208,22 +166,13 @@ static int hctr2_hash_message(struct skcipher_request *req,
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
- err = crypto_shash_update(hash_desc, miter.addr, n);
- if (err)
- break;
+ polyval_update(poly_ctx, miter.addr, n);
}
sg_miter_stop(&miter);
- if (err)
- return err;
-
- if (remainder) {
- err = crypto_shash_update(hash_desc, padding,
- BLOCKCIPHER_BLOCK_SIZE - remainder);
- if (err)
- return err;
- }
- return crypto_shash_final(hash_desc, digest);
+ if (req->cryptlen % BLOCKCIPHER_BLOCK_SIZE)
+ polyval_update(poly_ctx, &padding, 1);
+ polyval_final(poly_ctx, digest);
}
static int hctr2_finish(struct skcipher_request *req)
@@ -231,19 +180,14 @@ static int hctr2_finish(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
+ struct polyval_ctx *poly_ctx = &rctx->u.poly_ctx;
u8 digest[POLYVAL_DIGEST_SIZE];
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
- int err;
// U = UU ^ H(T || V)
// or M = MM ^ H(T || N)
- hash_desc->tfm = tctx->polyval;
- err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx));
- if (err)
- return err;
- err = hctr2_hash_message(req, rctx->bulk_part_dst, digest);
- if (err)
- return err;
+ polyval_import_blkaligned(poly_ctx, &tctx->poly_key,
+ &rctx->hashed_tweak);
+ hctr2_hash_message(req, rctx->bulk_part_dst, digest);
crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE);
// Copy U (or M) into dst scatterlist
@@ -252,10 +196,9 @@ static int hctr2_finish(struct skcipher_request *req)
return 0;
}
-static void hctr2_xctr_done(struct crypto_async_request *areq,
- int err)
+static void hctr2_xctr_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err)
err = hctr2_finish(req);
@@ -270,7 +213,6 @@ static int hctr2_crypt(struct skcipher_request *req, bool enc)
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
u8 digest[POLYVAL_DIGEST_SIZE];
int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- int err;
// Requests must be at least one block
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
@@ -288,12 +230,8 @@ static int hctr2_crypt(struct skcipher_request *req, bool enc)
// MM = M ^ H(T || N)
// or UU = U ^ H(T || V)
- err = hctr2_hash_tweak(req);
- if (err)
- return err;
- err = hctr2_hash_message(req, rctx->bulk_part_src, digest);
- if (err)
- return err;
+ hctr2_hash_tweak(req);
+ hctr2_hash_message(req, rctx->bulk_part_src, digest);
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
// UU = E(MM)
@@ -339,8 +277,6 @@ static int hctr2_init_tfm(struct crypto_skcipher *tfm)
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *xctr;
struct crypto_cipher *blockcipher;
- struct crypto_shash *polyval;
- unsigned int subreq_size;
int err;
xctr = crypto_spawn_skcipher(&ictx->xctr_spawn);
@@ -353,31 +289,17 @@ static int hctr2_init_tfm(struct crypto_skcipher *tfm)
goto err_free_xctr;
}
- polyval = crypto_spawn_shash(&ictx->polyval_spawn);
- if (IS_ERR(polyval)) {
- err = PTR_ERR(polyval);
- goto err_free_blockcipher;
- }
-
tctx->xctr = xctr;
tctx->blockcipher = blockcipher;
- tctx->polyval = polyval;
BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) !=
sizeof(struct hctr2_request_ctx));
- subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) +
- crypto_shash_descsize(polyval),
- sizeof_field(struct hctr2_request_ctx, u.xctr_req) +
- crypto_skcipher_reqsize(xctr));
-
- tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) +
- subreq_size;
- crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset +
- crypto_shash_statesize(polyval));
+ crypto_skcipher_set_reqsize(
+ tfm, max(sizeof(struct hctr2_request_ctx),
+ offsetofend(struct hctr2_request_ctx, u.xctr_req) +
+ crypto_skcipher_reqsize(xctr)));
return 0;
-err_free_blockcipher:
- crypto_free_cipher(blockcipher);
err_free_xctr:
crypto_free_skcipher(xctr);
return err;
@@ -389,7 +311,6 @@ static void hctr2_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(tctx->blockcipher);
crypto_free_skcipher(tctx->xctr);
- crypto_free_shash(tctx->polyval);
}
static void hctr2_free_instance(struct skcipher_instance *inst)
@@ -398,21 +319,17 @@ static void hctr2_free_instance(struct skcipher_instance *inst)
crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_skcipher(&ictx->xctr_spawn);
- crypto_drop_shash(&ictx->polyval_spawn);
kfree(inst);
}
-static int hctr2_create_common(struct crypto_template *tmpl,
- struct rtattr **tb,
- const char *xctr_name,
- const char *polyval_name)
+static int hctr2_create_common(struct crypto_template *tmpl, struct rtattr **tb,
+ const char *xctr_name)
{
+ struct skcipher_alg_common *xctr_alg;
u32 mask;
struct skcipher_instance *inst;
struct hctr2_instance_ctx *ictx;
- struct skcipher_alg *xctr_alg;
struct crypto_alg *blockcipher_alg;
- struct shash_alg *polyval_alg;
char blockcipher_name[CRYPTO_MAX_ALG_NAME];
int len;
int err;
@@ -432,7 +349,7 @@ static int hctr2_create_common(struct crypto_template *tmpl,
xctr_name, 0, mask);
if (err)
goto err_free_inst;
- xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn);
+ xctr_alg = crypto_spawn_skcipher_alg_common(&ictx->xctr_spawn);
err = -EINVAL;
if (strncmp(xctr_alg->base.cra_name, "xctr(", 5))
@@ -458,19 +375,6 @@ static int hctr2_create_common(struct crypto_template *tmpl,
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
goto err_free_inst;
- /* Polyval ε-∆U hash function */
- err = crypto_grab_shash(&ictx->polyval_spawn,
- skcipher_crypto_instance(inst),
- polyval_name, 0, mask);
- if (err)
- goto err_free_inst;
- polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn);
-
- /* Ensure Polyval is being used */
- err = -EINVAL;
- if (strcmp(polyval_alg->base.cra_name, "polyval") != 0)
- goto err_free_inst;
-
/* Instance fields */
err = -ENAMETOOLONG;
@@ -478,31 +382,24 @@ static int hctr2_create_common(struct crypto_template *tmpl,
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "hctr2_base(%s,%s)",
- xctr_alg->base.cra_driver_name,
- polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "hctr2_base(%s,polyval-lib)",
+ xctr_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
- inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) +
- polyval_alg->statesize * 2;
- inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask |
- polyval_alg->base.cra_alignmask;
- /*
- * The hash function is called twice, so it is weighted higher than the
- * xctr and blockcipher.
- */
+ inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx);
+ inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask;
inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority +
- 4 * polyval_alg->base.cra_priority +
- blockcipher_alg->cra_priority) / 7;
+ blockcipher_alg->cra_priority) /
+ 3;
inst->alg.setkey = hctr2_setkey;
inst->alg.encrypt = hctr2_encrypt;
inst->alg.decrypt = hctr2_decrypt;
inst->alg.init = hctr2_init_tfm;
inst->alg.exit = hctr2_exit_tfm;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg);
+ inst->alg.min_keysize = xctr_alg->min_keysize;
+ inst->alg.max_keysize = xctr_alg->max_keysize;
inst->alg.ivsize = TWEAK_SIZE;
inst->free = hctr2_free_instance;
@@ -527,8 +424,11 @@ static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb)
polyval_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(polyval_name))
return PTR_ERR(polyval_name);
+ if (strcmp(polyval_name, "polyval") != 0 &&
+ strcmp(polyval_name, "polyval-lib") != 0)
+ return -ENOENT;
- return hctr2_create_common(tmpl, tb, xctr_name, polyval_name);
+ return hctr2_create_common(tmpl, tb, xctr_name);
}
static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -544,7 +444,7 @@ static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
blockcipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- return hctr2_create_common(tmpl, tb, xctr_name, "polyval");
+ return hctr2_create_common(tmpl, tb, xctr_name);
}
static struct crypto_template hctr2_tmpls[] = {
@@ -572,10 +472,10 @@ static void __exit hctr2_module_exit(void)
ARRAY_SIZE(hctr2_tmpls));
}
-subsys_initcall(hctr2_module_init);
+module_init(hctr2_module_init);
module_exit(hctr2_module_exit);
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("hctr2");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
new file mode 100644
index 000000000000..82d1b32ca6ce
--- /dev/null
+++ b/crypto/hkdf.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
+ * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
+ * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/hkdf.h>
+#include <linux/module.h>
+
+/*
+ * HKDF consists of two steps:
+ *
+ * 1. HKDF-Extract: extract a pseudorandom key from the input keying material
+ * and optional salt.
+ * 2. HKDF-Expand: expand the pseudorandom key into output keying material of
+ * any length, parameterized by an application-specific info string.
+ *
+ */
+
+/**
+ * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2)
+ * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The
+ * caller is responsible for setting the @prk afterwards.
+ * @ikm: input keying material
+ * @ikmlen: length of @ikm
+ * @salt: input salt value
+ * @saltlen: length of @salt
+ * @prk: resulting pseudorandom key
+ *
+ * Extracts a pseudorandom key @prk from the input keying material
+ * @ikm with length @ikmlen and salt @salt with length @saltlen.
+ * The length of @prk is given by the digest size of @hmac_tfm.
+ * For an 'unsalted' version of HKDF-Extract @salt must be set
+ * to all zeroes and @saltlen must be set to the length of @prk.
+ *
+ * Returns 0 on success with the pseudorandom key stored in @prk,
+ * or a negative errno value otherwise.
+ */
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk)
+{
+ int err;
+
+ err = crypto_shash_setkey(hmac_tfm, salt, saltlen);
+ if (!err)
+ err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_extract);
+
+/**
+ * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @info: application-specific information
+ * @infolen: length of @info
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * This expands the pseudorandom key, which was already keyed into @hmac_tfm,
+ * into @okmlen bytes of output keying material parameterized by the
+ * application-specific @info of length @infolen bytes.
+ * This is thread-safe and may be called by multiple threads in parallel.
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
+{
+ SHASH_DESC_ON_STACK(desc, hmac_tfm);
+ unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm);
+ int err;
+ const u8 *prev = NULL;
+ u8 counter = 1;
+ u8 tmp[HASH_MAX_DIGESTSIZE] = {};
+
+ if (WARN_ON(okmlen > 255 * hashlen))
+ return -EINVAL;
+
+ desc->tfm = hmac_tfm;
+
+ for (i = 0; i < okmlen; i += hashlen) {
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ if (prev) {
+ err = crypto_shash_update(desc, prev, hashlen);
+ if (err)
+ goto out;
+ }
+
+ if (infolen) {
+ err = crypto_shash_update(desc, info, infolen);
+ if (err)
+ goto out;
+ }
+
+ BUILD_BUG_ON(sizeof(counter) != 1);
+ if (okmlen - i < hashlen) {
+ err = crypto_shash_finup(desc, &counter, 1, tmp);
+ if (err)
+ goto out;
+ memcpy(&okm[i], tmp, okmlen - i);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
+ if (err)
+ goto out;
+ }
+ counter++;
+ prev = &okm[i];
+ }
+ err = 0;
+out:
+ if (unlikely(err))
+ memzero_explicit(okm, okmlen); /* so caller doesn't need to */
+ shash_desc_zero(desc);
+ memzero_explicit(tmp, HASH_MAX_DIGESTSIZE);
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_expand);
+
+struct hkdf_testvec {
+ const char *test;
+ const u8 *ikm;
+ const u8 *salt;
+ const u8 *info;
+ const u8 *prk;
+ const u8 *okm;
+ u16 ikm_size;
+ u16 salt_size;
+ u16 info_size;
+ u16 prk_size;
+ u16 okm_size;
+};
+
+/*
+ * HKDF test vectors from RFC5869
+ *
+ * Additional HKDF test vectors from
+ * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md
+ */
+static const struct hkdf_testvec hkdf_sha256_tv[] = {
+ {
+ .test = "basic hdkf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63"
+ "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5",
+ .prk_size = 32,
+ .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a"
+ "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf"
+ "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c"
+ "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44",
+ .prk_size = 32,
+ .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34"
+ "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c"
+ "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09"
+ "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71"
+ "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f"
+ "\x1d\x87",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf"
+ "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04",
+ .prk_size = 32,
+ .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31"
+ "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d"
+ "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c"
+ "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf",
+ .prk_size = 32,
+ .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43"
+ "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d"
+ "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 32,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d"
+ "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf",
+ .prk_size = 32,
+ .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53"
+ "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a"
+ "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha384_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30"
+ "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde"
+ "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde",
+ .prk_size = 48,
+ .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38"
+ "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7"
+ "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3"
+ "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b"
+ "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb",
+ .prk_size = 48,
+ .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81"
+ "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee"
+ "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49"
+ "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6"
+ "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a"
+ "\x8b\x2e",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d"
+ "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b"
+ "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5",
+ .prk_size = 48,
+ .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf"
+ "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa"
+ "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0"
+ "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a"
+ "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72",
+ .prk_size = 48,
+ .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75"
+ "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1"
+ "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 48,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e"
+ "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc"
+ "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5",
+ .prk_size = 48,
+ .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78"
+ "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b"
+ "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha512_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b"
+ "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26"
+ "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f"
+ "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37",
+ .prk_size = 64,
+ .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4"
+ "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93"
+ "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d"
+ "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e"
+ "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe"
+ "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a",
+ .prk_size = 64,
+ .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67"
+ "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1"
+ "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a"
+ "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35"
+ "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e"
+ "\x7a\x93",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21"
+ "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b"
+ "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde"
+ "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6",
+ .prk_size = 64,
+ .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c"
+ "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f"
+ "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f"
+ "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f"
+ "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2"
+ "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d",
+ .prk_size = 64,
+ .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff"
+ "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35"
+ "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 64,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89"
+ "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa"
+ "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1"
+ "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c",
+ .prk_size = 64,
+ .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90"
+ "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a"
+ "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb",
+ .okm_size = 42,
+ }
+};
+
+static int hkdf_test(const char *shash, const struct hkdf_testvec *tv)
+{ struct crypto_shash *tfm = NULL;
+ u8 *prk = NULL, *okm = NULL;
+ unsigned int prk_size;
+ const char *driver;
+ int err;
+
+ tfm = crypto_alloc_shash(shash, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("%s(%s): failed to allocate transform: %ld\n",
+ tv->test, shash, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ driver = crypto_shash_driver_name(tfm);
+
+ prk_size = crypto_shash_digestsize(tfm);
+ prk = kzalloc(prk_size, GFP_KERNEL);
+ if (!prk) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (tv->prk_size != prk_size) {
+ pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n",
+ tv->test, driver, tv->prk_size, prk_size);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = hkdf_extract(tfm, tv->ikm, tv->ikm_size,
+ tv->salt, tv->salt_size, prk);
+ if (err) {
+ pr_err("%s(%s): hkdf_extract failed with %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ if (memcmp(prk, tv->prk, tv->prk_size)) {
+ pr_err("%s(%s): hkdf_extract prk mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE,
+ 16, 1, prk, tv->prk_size, false);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ okm = kzalloc(tv->okm_size, GFP_KERNEL);
+ if (!okm) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size);
+ if (err) {
+ pr_err("%s(%s): failed to set prk, error %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ err = hkdf_expand(tfm, tv->info, tv->info_size,
+ okm, tv->okm_size);
+ if (err) {
+ pr_err("%s(%s): hkdf_expand() failed with %d\n",
+ tv->test, driver, err);
+ } else if (memcmp(okm, tv->okm, tv->okm_size)) {
+ pr_err("%s(%s): hkdf_expand() okm mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE,
+ 16, 1, okm, tv->okm_size, false);
+ err = -EINVAL;
+ }
+out_free:
+ kfree(okm);
+ kfree(prk);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+static int __init crypto_hkdf_module_init(void)
+{
+ int ret = 0, i;
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
+ ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) {
+ ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) {
+ ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit crypto_hkdf_module_exit(void) {}
+
+late_initcall(crypto_hkdf_module_init);
+module_exit(crypto_hkdf_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 3610ff0b6739..148af460ae97 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -13,30 +13,24 @@
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/fips.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
struct hmac_ctx {
struct crypto_shash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
};
-static inline void *align_ptr(void *p, unsigned int align)
-{
- return (void *)ALIGN((unsigned long)p, align);
-}
-
-static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
-{
- return align_ptr(crypto_shash_ctx_aligned(tfm) +
- crypto_shash_statesize(tfm) * 2,
- crypto_tfm_ctx_alignment());
-}
+struct ahash_hmac_ctx {
+ struct crypto_ahash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
+};
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
@@ -44,13 +38,12 @@ static int hmac_setkey(struct crypto_shash *parent,
int bs = crypto_shash_blocksize(parent);
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
- char *ipad = crypto_shash_ctx_aligned(parent);
- char *opad = ipad + ss;
- struct hmac_ctx *ctx = align_ptr(opad + ss,
- crypto_tfm_ctx_alignment());
- struct crypto_shash *hash = ctx->hash;
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+ struct crypto_shash *hash = tctx->hash;
+ u8 *ipad = &tctx->pads[0];
+ u8 *opad = &tctx->pads[ss];
SHASH_DESC_ON_STACK(shash, hash);
- unsigned int i;
+ int err, i;
if (fips_enabled && (keylen < 112 / 8))
return -EINVAL;
@@ -76,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent,
opad[i] ^= HMAC_OPAD_VALUE;
}
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, ipad, bs) ?:
- crypto_shash_export(shash, ipad) ?:
- crypto_shash_init(shash) ?:
- crypto_shash_update(shash, opad, bs) ?:
- crypto_shash_export(shash, opad);
+ err = crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, ipad, bs) ?:
+ crypto_shash_export(shash, ipad) ?:
+ crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, opad, bs) ?:
+ crypto_shash_export(shash, opad);
+ shash_desc_zero(shash);
+ return err;
}
static int hmac_export(struct shash_desc *pdesc, void *out)
@@ -94,37 +89,42 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
static int hmac_import(struct shash_desc *pdesc, const void *in)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
- desc->tfm = ctx->hash;
+ desc->tfm = tctx->hash;
return crypto_shash_import(desc, in);
}
-static int hmac_init(struct shash_desc *pdesc)
+static int hmac_export_core(struct shash_desc *pdesc, void *out)
{
- return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ return crypto_shash_export_core(desc, out);
}
-static int hmac_update(struct shash_desc *pdesc,
- const u8 *data, unsigned int nbytes)
+static int hmac_import_core(struct shash_desc *pdesc, const void *in)
{
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
struct shash_desc *desc = shash_desc_ctx(pdesc);
- return crypto_shash_update(desc, data, nbytes);
+ desc->tfm = tctx->hash;
+ return crypto_shash_import_core(desc, in);
}
-static int hmac_final(struct shash_desc *pdesc, u8 *out)
+static int hmac_init(struct shash_desc *pdesc)
+{
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
+
+ return hmac_import(pdesc, &tctx->pads[0]);
+}
+
+static int hmac_update(struct shash_desc *pdesc,
+ const u8 *data, unsigned int nbytes)
{
- struct crypto_shash *parent = pdesc->tfm;
- int ds = crypto_shash_digestsize(parent);
- int ss = crypto_shash_statesize(parent);
- char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
- return crypto_shash_final(desc, out) ?:
- crypto_shash_import(desc, opad) ?:
- crypto_shash_finup(desc, out, ds, out);
+ return crypto_shash_update(desc, data, nbytes);
}
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
@@ -134,7 +134,8 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
- char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+ const u8 *opad = &tctx->pads[ss];
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_finup(desc, data, nbytes, out) ?:
@@ -147,45 +148,54 @@ static int hmac_init_tfm(struct crypto_shash *parent)
struct crypto_shash *hash;
struct shash_instance *inst = shash_alg_instance(parent);
struct crypto_shash_spawn *spawn = shash_instance_ctx(inst);
- struct hmac_ctx *ctx = hmac_ctx(parent);
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
- parent->descsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(hash);
+ tctx->hash = hash;
+ return 0;
+}
+
+static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
+{
+ struct hmac_ctx *sctx = crypto_shash_ctx(src);
+ struct hmac_ctx *dctx = crypto_shash_ctx(dst);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->hash = hash;
+ dctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_shash *parent)
{
- struct hmac_ctx *ctx = hmac_ctx(parent);
- crypto_free_shash(ctx->hash);
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+
+ crypto_free_shash(tctx->hash);
}
-static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int __hmac_create_shash(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 mask)
{
struct shash_instance *inst;
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
- u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
@@ -204,29 +214,28 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
ss < alg->cra_blocksize)
goto err_free_inst;
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
+ err = crypto_inst_setname(shash_crypto_instance(inst), "hmac",
+ "hmac-shash", alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2);
- ss = ALIGN(ss, alg->cra_alignmask + 1);
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
-
- inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
- ALIGN(ss * 2, crypto_tfm_ctx_alignment());
-
+ inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize;
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
- inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
+ inst->alg.export_core = hmac_export_core;
+ inst->alg.import_core = hmac_import_core;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
+ inst->alg.clone_tfm = hmac_clone_tfm;
inst->alg.exit_tfm = hmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
@@ -239,23 +248,332 @@ err_free_inst:
return err;
}
-static struct crypto_template hmac_tmpl = {
- .name = "hmac",
- .create = hmac_create,
- .module = THIS_MODULE,
+static int hmac_setkey_ahash(struct crypto_ahash *parent,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
+ int ds = crypto_ahash_digestsize(parent);
+ int bs = crypto_ahash_blocksize(parent);
+ int ss = crypto_ahash_statesize(parent);
+ HASH_REQUEST_ON_STACK(req, fb);
+ u8 *opad = &tctx->pads[ss];
+ u8 *ipad = &tctx->pads[0];
+ int err, i;
+
+ if (fips_enabled && (keylen < 112 / 8))
+ return -EINVAL;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+
+ if (keylen > bs) {
+ ahash_request_set_virt(req, inkey, ipad, keylen);
+ err = crypto_ahash_digest(req);
+ if (err)
+ goto out_zero_req;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, inkey, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ahash_request_set_virt(req, ipad, NULL, bs);
+ err = crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, ipad);
+
+ ahash_request_set_virt(req, opad, NULL, bs);
+ err = err ?:
+ crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, opad);
+
+out_zero_req:
+ HASH_REQUEST_ZERO(req);
+ return err;
+}
+
+static int hmac_export_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import(req, in);
+}
+
+static int hmac_export_core_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export_core(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_core_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import_core(req, in);
+}
+
+static int hmac_init_ahash(struct ahash_request *preq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ return hmac_import_ahash(preq, &tctx->pads[0]);
+}
+
+static int hmac_update_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ preq->base.complete, preq->base.data);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes);
+ return crypto_ahash_update(req);
+}
+
+static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_request *req = ahash_request_ctx(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ int ds = crypto_ahash_digestsize(tfm);
+ int ss = crypto_ahash_statesize(tfm);
+ const u8 *opad = &tctx->pads[ss];
+
+ ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask,
+ preq->base.complete, preq->base.data);
+ ahash_request_set_virt(req, preq->result, preq->result, ds);
+ return crypto_ahash_import(req, opad) ?:
+ crypto_ahash_finup(req);
+
+}
+
+static void hmac_finup_done(void *data, int err)
+{
+ struct ahash_request *preq = data;
+
+ if (err)
+ goto out;
+
+ err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ ahash_request_complete(preq, err);
+}
+
+static int hmac_finup_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ hmac_finup_done, preq);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, preq->result,
+ preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, preq->result,
+ preq->nbytes);
+ return crypto_ahash_finup(req) ?:
+ hmac_finup_finish(preq, 0);
+}
+
+static int hmac_digest_ahash(struct ahash_request *preq)
+{
+ return hmac_init_ahash(preq) ?:
+ hmac_finup_ahash(preq);
+}
+
+static int hmac_init_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_instance *inst = ahash_alg_instance(parent);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *hash;
+
+ hash = crypto_spawn_ahash(ahash_instance_ctx(inst));
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(hash))
+ return -EINVAL;
+
+ tctx->hash = hash;
+ return 0;
+}
+
+static int hmac_clone_ahash_tfm(struct crypto_ahash *dst,
+ struct crypto_ahash *src)
+{
+ struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src);
+ struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst);
+ struct crypto_ahash *hash;
+
+ hash = crypto_clone_ahash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
+static void hmac_exit_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+
+ crypto_free_ahash(tctx->hash);
+}
+
+static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb,
+ u32 mask)
+{
+ struct crypto_ahash_spawn *spawn;
+ struct ahash_instance *inst;
+ struct crypto_alg *alg;
+ struct hash_alg_common *halg;
+ int ds, ss, err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = ahash_instance_ctx(inst);
+
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ halg = crypto_spawn_ahash_alg(spawn);
+ alg = &halg->base;
+
+ /* The underlying hash algorithm must not require a key */
+ err = -EINVAL;
+ if (crypto_hash_alg_needs_key(halg))
+ goto err_free_inst;
+
+ ds = halg->digestsize;
+ ss = halg->statesize;
+ if (ds > alg->cra_blocksize || ss < alg->cra_blocksize)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.halg.base.cra_flags = alg->cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS;
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT;
+ inst->alg.halg.base.cra_priority = alg->cra_priority + 100;
+ inst->alg.halg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) +
+ (ss * 2);
+ inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) +
+ alg->cra_reqsize;
+
+ inst->alg.halg.digestsize = ds;
+ inst->alg.halg.statesize = ss;
+ inst->alg.init = hmac_init_ahash;
+ inst->alg.update = hmac_update_ahash;
+ inst->alg.finup = hmac_finup_ahash;
+ inst->alg.digest = hmac_digest_ahash;
+ inst->alg.export = hmac_export_ahash;
+ inst->alg.import = hmac_import_ahash;
+ inst->alg.export_core = hmac_export_core_ahash;
+ inst->alg.import_core = hmac_import_core_ahash;
+ inst->alg.setkey = hmac_setkey_ahash;
+ inst->alg.init_tfm = hmac_init_ahash_tfm;
+ inst->alg.clone_tfm = hmac_clone_ahash_tfm;
+ inst->alg.exit_tfm = hmac_exit_ahash_tfm;
+
+ inst->free = ahash_free_singlespawn_instance;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ahash_free_singlespawn_instance(inst);
+ }
+ return err;
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ u32 mask;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ mask = crypto_algt_inherited_mask(algt);
+
+ if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK))
+ return hmac_create_ahash(tmpl, tb, mask);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK)
+ return -EINVAL;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
+ if (err)
+ return err == -EINVAL ? -ENOENT : err;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static struct crypto_template hmac_tmpls[] = {
+ {
+ .name = "hmac",
+ .create = hmac_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "hmac-shash",
+ .create = hmac_create_shash,
+ .module = THIS_MODULE,
+ },
};
static int __init hmac_module_init(void)
{
- return crypto_register_template(&hmac_tmpl);
+ return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
static void __exit hmac_module_exit(void)
{
- crypto_unregister_template(&hmac_tmpl);
+ crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
-subsys_initcall(hmac_module_init);
+module_init(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/internal.h b/crypto/internal.h
index c08385571853..b9afd68767c1 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <linux/completion.h>
+#include <linux/err.h>
#include <linux/jump_label.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/numa.h>
#include <linux/refcount.h>
#include <linux/rwsem.h>
+#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -31,6 +33,22 @@ struct crypto_larval {
bool test_started;
};
+struct crypto_type {
+ unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+ unsigned int (*extsize)(struct crypto_alg *alg);
+ int (*init_tfm)(struct crypto_tfm *tfm);
+ void (*show)(struct seq_file *m, struct crypto_alg *alg);
+ int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+ void (*free)(struct crypto_instance *inst);
+ void (*destroy)(struct crypto_alg *alg);
+
+ unsigned int type;
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+ unsigned int algsize;
+};
+
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -47,7 +65,29 @@ extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
-DECLARE_STATIC_KEY_FALSE(crypto_boot_test_finished);
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
+#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
+static inline bool crypto_boot_test_finished(void)
+{
+ return true;
+}
+static inline void set_crypto_boot_test_finished(void)
+{
+}
+#else
+DECLARE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
+static inline bool crypto_boot_test_finished(void)
+{
+ return static_branch_likely(&__crypto_boot_test_finished);
+}
+static inline void set_crypto_boot_test_finished(void)
+{
+ static_branch_enable(&__crypto_boot_test_finished);
+}
+#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) ||
+ * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
+ */
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
@@ -73,18 +113,21 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
-void crypto_larval_kill(struct crypto_alg *alg);
-void crypto_wait_for_test(struct crypto_larval *larval);
+void crypto_schedule_test(struct crypto_larval *larval);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
+struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
+ u32 mask, gfp_t gfp);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -119,10 +162,12 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
return alg;
}
+void crypto_destroy_alg(struct crypto_alg *alg);
+
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
- alg->cra_destroy(alg);
+ if (refcount_dec_and_test(&alg->cra_refcnt))
+ crypto_destroy_alg(alg);
}
static inline int crypto_tmpl_get(struct crypto_template *tmpl)
@@ -166,5 +211,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval)
return larval->alg.cra_driver_name[0];
}
+static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm)
+{
+ return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW);
+}
+
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 2d115bec15ae..7c880cf34c52 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Linux Kernel Crypto API specific code
*
- * Copyright Stephan Mueller <smueller@chronox.de>, 2015
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,6 +37,9 @@
* DAMAGE.
*/
+#include <crypto/hash.h>
+#include <crypto/sha3.h>
+#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -45,28 +48,30 @@
#include "jitterentropy.h"
+#define JENT_CONDITIONING_HASH "sha3-256"
+
/***************************************************************************
* Helper function
***************************************************************************/
-void *jent_zalloc(unsigned int len)
+void *jent_kvzalloc(unsigned int len)
{
- return kzalloc(len, GFP_KERNEL);
+ return kvzalloc(len, GFP_KERNEL);
}
-void jent_zfree(void *ptr)
+void jent_kvzfree(void *ptr, unsigned int len)
{
- kfree_sensitive(ptr);
+ kvfree_sensitive(ptr, len);
}
-void jent_panic(char *s)
+void *jent_zalloc(unsigned int len)
{
- panic("%s", s);
+ return kzalloc(len, GFP_KERNEL);
}
-void jent_memcpy(void *dest, const void *src, unsigned int n)
+void jent_zfree(void *ptr)
{
- memcpy(dest, src, n);
+ kfree_sensitive(ptr);
}
/*
@@ -93,6 +98,94 @@ void jent_get_nstime(__u64 *out)
tmp = ktime_get_ns();
*out = tmp;
+ jent_raw_hires_entropy_store(tmp);
+}
+
+int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ unsigned int addtl_len, __u64 hash_loop_cnt,
+ unsigned int stuck)
+{
+ struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
+ SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm);
+ u8 intermediary[SHA3_256_DIGEST_SIZE];
+ __u64 j = 0;
+ int ret;
+
+ desc->tfm = hash_state_desc->tfm;
+
+ if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) {
+ pr_warn_ratelimited("Unexpected digest size\n");
+ return -EINVAL;
+ }
+ kmsan_unpoison_memory(intermediary, sizeof(intermediary));
+
+ /*
+ * This loop fills a buffer which is injected into the entropy pool.
+ * The main reason for this loop is to execute something over which we
+ * can perform a timing measurement. The injection of the resulting
+ * data into the pool is performed to ensure the result is used and
+ * the compiler cannot optimize the loop away in case the result is not
+ * used at all. Yet that data is considered "additional information"
+ * considering the terminology from SP800-90A without any entropy.
+ *
+ * Note, it does not matter which or how much data you inject, we are
+ * interested in one Keccack1600 compression operation performed with
+ * the crypto_shash_final.
+ */
+ for (j = 0; j < hash_loop_cnt; j++) {
+ ret = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, intermediary,
+ sizeof(intermediary)) ?:
+ crypto_shash_finup(desc, addtl, addtl_len, intermediary);
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * Inject the data from the previous loop into the pool. This data is
+ * not considered to contain any entropy, but it stirs the pool a bit.
+ */
+ ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary));
+ if (ret)
+ goto err;
+
+ /*
+ * Insert the time stamp into the hash context representing the pool.
+ *
+ * If the time stamp is stuck, do not finally insert the value into the
+ * entropy pool. Although this operation should not do any harm even
+ * when the time stamp has no entropy, SP800-90B requires that any
+ * conditioning operation to have an identical amount of input data
+ * according to section 3.1.5.
+ */
+ if (stuck) {
+ time = 0;
+ }
+
+ ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64));
+
+err:
+ shash_desc_zero(desc);
+ memzero_explicit(intermediary, sizeof(intermediary));
+
+ return ret;
+}
+
+int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
+{
+ struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
+ u8 jent_block[SHA3_256_DIGEST_SIZE];
+ /* Obtain data from entropy pool and re-initialize it */
+ int ret = crypto_shash_final(hash_state_desc, jent_block) ?:
+ crypto_shash_init(hash_state_desc) ?:
+ crypto_shash_update(hash_state_desc, jent_block,
+ sizeof(jent_block));
+
+ if (!ret && dst_len)
+ memcpy(dst, jent_block, dst_len);
+
+ memzero_explicit(jent_block, sizeof(jent_block));
+ return ret;
}
/***************************************************************************
@@ -102,33 +195,76 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
- unsigned int reset_cnt;
+ struct crypto_shash *tfm;
+ struct shash_desc *sdesc;
};
-static int jent_kcapi_init(struct crypto_tfm *tfm)
+static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
- int ret = 0;
- rng->entropy_collector = jent_entropy_collector_alloc(1, 0);
- if (!rng->entropy_collector)
- ret = -ENOMEM;
+ spin_lock(&rng->jent_lock);
- spin_lock_init(&rng->jent_lock);
- return ret;
-}
+ if (rng->sdesc) {
+ shash_desc_zero(rng->sdesc);
+ kfree(rng->sdesc);
+ }
+ rng->sdesc = NULL;
-static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
-{
- struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+ if (rng->tfm)
+ crypto_free_shash(rng->tfm);
+ rng->tfm = NULL;
- spin_lock(&rng->jent_lock);
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
spin_unlock(&rng->jent_lock);
}
+static int jent_kcapi_init(struct crypto_tfm *tfm)
+{
+ struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+ struct crypto_shash *hash;
+ struct shash_desc *sdesc;
+ int size, ret = 0;
+
+ spin_lock_init(&rng->jent_lock);
+
+ /* Use SHA3-256 as conditioner */
+ hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
+ if (IS_ERR(hash)) {
+ pr_err("Cannot allocate conditioning digest\n");
+ return PTR_ERR(hash);
+ }
+ rng->tfm = hash;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sdesc->tfm = hash;
+ crypto_shash_init(sdesc);
+ rng->sdesc = sdesc;
+
+ rng->entropy_collector =
+ jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0,
+ sdesc);
+ if (!rng->entropy_collector) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&rng->jent_lock);
+ return 0;
+
+err:
+ jent_kcapi_cleanup(tfm);
+ return ret;
+}
+
static int jent_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
@@ -138,32 +274,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
spin_lock(&rng->jent_lock);
- /* Return a permanent error in case we had too many resets in a row. */
- if (rng->reset_cnt > (1<<10)) {
- ret = -EFAULT;
- goto out;
- }
-
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
- /* Reset RNG in case of health failures */
- if (ret < -1) {
- pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
- (ret == -2) ? "Repetition Count Test" :
- "Adaptive Proportion Test");
-
- rng->reset_cnt++;
-
+ if (ret == -3) {
+ /* Handle permanent health test error */
+ /*
+ * If the kernel was booted with fips=1, it implies that
+ * the entire kernel acts as a FIPS 140 module. In this case
+ * an SP800-90B permanent health test error is treated as
+ * a FIPS module error.
+ */
+ if (fips_enabled)
+ panic("Jitter RNG permanent health test failure\n");
+
+ pr_err("Jitter RNG permanent health test failure\n");
+ ret = -EFAULT;
+ } else if (ret == -2) {
+ /* Handle intermittent health test error */
+ pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
- } else {
- rng->reset_cnt = 0;
-
- /* Convert the Jitter RNG error into a usable error code */
- if (ret == -1)
- ret = -EINVAL;
+ } else if (ret == -1) {
+ /* Handle other errors */
+ ret = -EINVAL;
}
-out:
spin_unlock(&rng->jent_lock);
return ret;
@@ -187,16 +321,34 @@ static struct rng_alg jent_alg = {
.cra_module = THIS_MODULE,
.cra_init = jent_kcapi_init,
.cra_exit = jent_kcapi_cleanup,
-
}
};
static int __init jent_mod_init(void)
{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ struct crypto_shash *tfm;
int ret = 0;
- ret = jent_entropy_init();
+ jent_testing_init();
+
+ tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
+ if (IS_ERR(tfm)) {
+ jent_testing_exit();
+ return PTR_ERR(tfm);
+ }
+
+ desc->tfm = tfm;
+ crypto_shash_init(desc);
+ ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL);
+ shash_desc_zero(desc);
+ crypto_free_shash(tfm);
if (ret) {
+ /* Handle permanent health test error */
+ if (fips_enabled)
+ panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+
+ jent_testing_exit();
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
@@ -205,6 +357,7 @@ static int __init jent_mod_init(void)
static void __exit jent_mod_exit(void)
{
+ jent_testing_exit();
crypto_unregister_rng(&jent_alg);
}
diff --git a/crypto/jitterentropy-testing.c b/crypto/jitterentropy-testing.c
new file mode 100644
index 000000000000..21c9d7c3269a
--- /dev/null
+++ b/crypto/jitterentropy-testing.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Test interface for Jitter RNG.
+ *
+ * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "jitterentropy.h"
+
+#define JENT_TEST_RINGBUFFER_SIZE (1<<10)
+#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1)
+
+struct jent_testing {
+ u64 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
+ u32 rb_reader;
+ atomic_t rb_writer;
+ atomic_t jent_testing_enabled;
+ spinlock_t lock;
+ wait_queue_head_t read_wait;
+};
+
+static struct dentry *jent_raw_debugfs_root = NULL;
+
+/*************************** Generic Data Handling ****************************/
+
+/*
+ * boot variable:
+ * 0 ==> No boot test, gathering of runtime data allowed
+ * 1 ==> Boot test enabled and ready for collecting data, gathering runtime
+ * data is disabled
+ * 2 ==> Boot test completed and disabled, gathering of runtime data is
+ * disabled
+ */
+
+static void jent_testing_reset(struct jent_testing *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ data->rb_reader = 0;
+ atomic_set(&data->rb_writer, 0);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void jent_testing_data_init(struct jent_testing *data, u32 boot)
+{
+ /*
+ * The boot time testing implies we have a running test. If the
+ * caller wants to clear it, he has to unset the boot_test flag
+ * at runtime via sysfs to enable regular runtime testing
+ */
+ if (boot)
+ return;
+
+ jent_testing_reset(data);
+ atomic_set(&data->jent_testing_enabled, 1);
+ pr_warn("Enabling data collection\n");
+}
+
+static void jent_testing_fini(struct jent_testing *data, u32 boot)
+{
+ /* If we have boot data, we do not reset yet to allow data to be read */
+ if (boot)
+ return;
+
+ atomic_set(&data->jent_testing_enabled, 0);
+ jent_testing_reset(data);
+ pr_warn("Disabling data collection\n");
+}
+
+static bool jent_testing_store(struct jent_testing *data, u64 value,
+ u32 *boot)
+{
+ unsigned long flags;
+
+ if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1))
+ return false;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /*
+ * Disable entropy testing for boot time testing after ring buffer
+ * is filled.
+ */
+ if (*boot) {
+ if (((u32)atomic_read(&data->rb_writer)) >
+ JENT_TEST_RINGBUFFER_SIZE) {
+ *boot = 2;
+ pr_warn_once("One time data collection test disabled\n");
+ spin_unlock_irqrestore(&data->lock, flags);
+ return false;
+ }
+
+ if (atomic_read(&data->rb_writer) == 1)
+ pr_warn("One time data collection test enabled\n");
+ }
+
+ data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) &
+ JENT_TEST_RINGBUFFER_MASK] = value;
+ atomic_inc(&data->rb_writer);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (wq_has_sleeper(&data->read_wait))
+ wake_up_interruptible(&data->read_wait);
+
+ return true;
+}
+
+static bool jent_testing_have_data(struct jent_testing *data)
+{
+ return ((((u32)atomic_read(&data->rb_writer)) &
+ JENT_TEST_RINGBUFFER_MASK) !=
+ (data->rb_reader & JENT_TEST_RINGBUFFER_MASK));
+}
+
+static int jent_testing_reader(struct jent_testing *data, u32 *boot,
+ u8 *outbuf, u32 outbuflen)
+{
+ unsigned long flags;
+ int collected_data = 0;
+
+ jent_testing_data_init(data, *boot);
+
+ while (outbuflen) {
+ u32 writer = (u32)atomic_read(&data->rb_writer);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* We have no data or reached the writer. */
+ if (!writer || (writer == data->rb_reader)) {
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /*
+ * Now we gathered all boot data, enable regular data
+ * collection.
+ */
+ if (*boot) {
+ *boot = 0;
+ goto out;
+ }
+
+ wait_event_interruptible(data->read_wait,
+ jent_testing_have_data(data));
+ if (signal_pending(current)) {
+ collected_data = -ERESTARTSYS;
+ goto out;
+ }
+
+ continue;
+ }
+
+ /* We copy out word-wise */
+ if (outbuflen < sizeof(u64)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ goto out;
+ }
+
+ memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
+ sizeof(u64));
+ data->rb_reader++;
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ outbuf += sizeof(u64);
+ outbuflen -= sizeof(u64);
+ collected_data += sizeof(u64);
+ }
+
+out:
+ jent_testing_fini(data, *boot);
+ return collected_data;
+}
+
+static int jent_testing_extract_user(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos,
+ int (*reader)(u8 *outbuf, u32 outbuflen))
+{
+ u8 *tmp, *tmp_aligned;
+ int ret = 0, large_request = (nbytes > 256);
+
+ if (!nbytes)
+ return 0;
+
+ /*
+ * The intention of this interface is for collecting at least
+ * 1000 samples due to the SP800-90B requirements. However, due to
+ * memory and performance constraints, it is not desirable to allocate
+ * 8000 bytes of memory. Instead, we allocate space for only 125
+ * samples, which will allow the user to collect all 1000 samples using
+ * 8 calls to this interface.
+ */
+ tmp = kmalloc(125 * sizeof(u64) + sizeof(u64), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp_aligned = PTR_ALIGN(tmp, sizeof(u64));
+
+ while (nbytes) {
+ int i;
+
+ if (large_request && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ i = min_t(int, nbytes, 125 * sizeof(u64));
+ i = reader(tmp_aligned, i);
+ if (i <= 0) {
+ if (i < 0)
+ ret = i;
+ break;
+ }
+ if (copy_to_user(buf, tmp_aligned, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ kfree_sensitive(tmp);
+
+ if (ret > 0)
+ *ppos += ret;
+
+ return ret;
+}
+
+/************** Raw High-Resolution Timer Entropy Data Handling **************/
+
+static u32 boot_raw_hires_test = 0;
+module_param(boot_raw_hires_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_hires_test,
+ "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events");
+
+static struct jent_testing jent_raw_hires = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
+};
+
+int jent_raw_hires_entropy_store(__u64 value)
+{
+ return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
+}
+EXPORT_SYMBOL(jent_raw_hires_entropy_store);
+
+static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t jent_raw_hires_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return jent_testing_extract_user(file, to, count, ppos,
+ jent_raw_hires_entropy_reader);
+}
+
+static const struct file_operations jent_raw_hires_fops = {
+ .owner = THIS_MODULE,
+ .read = jent_raw_hires_read,
+};
+
+/******************************* Initialization *******************************/
+
+void jent_testing_init(void)
+{
+ jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ debugfs_create_file_unsafe("jent_raw_hires", 0400,
+ jent_raw_debugfs_root, NULL,
+ &jent_raw_hires_fops);
+}
+EXPORT_SYMBOL(jent_testing_init);
+
+void jent_testing_exit(void)
+{
+ debugfs_remove_recursive(jent_raw_debugfs_root);
+}
+EXPORT_SYMBOL(jent_testing_exit);
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 93bff3213823..3f93cdc9a7af 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
- * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023
*
* Design
* ======
@@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
- * version 2.2.0 provided at https://www.chronox.de/jent.html
+ * version 3.4.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
@@ -57,25 +57,28 @@
typedef unsigned long long __u64;
typedef long long __s64;
typedef unsigned int __u32;
+typedef unsigned char u8;
#define NULL ((void *) 0)
/* The entropy pool */
struct rand_data {
+ /* SHA3-256 is used as conditioner */
+#define DATA_SIZE_BITS 256
/* all data values that are vital to maintain the security
* of the RNG are marked as SENSITIVE. A user must not
* access that information while the RNG executes its loops to
* calculate the next random value. */
- __u64 data; /* SENSITIVE Actual random number */
- __u64 old_data; /* SENSITIVE Previous random number */
- __u64 prev_time; /* SENSITIVE Previous time stamp */
-#define DATA_SIZE_BITS ((sizeof(__u64)) * 8)
- __u64 last_delta; /* SENSITIVE stuck test */
- __s64 last_delta2; /* SENSITIVE stuck test */
- unsigned int osr; /* Oversample rate */
-#define JENT_MEMORY_BLOCKS 64
-#define JENT_MEMORY_BLOCKSIZE 32
+ void *hash_state; /* SENSITIVE hash state entropy pool */
+ __u64 prev_time; /* SENSITIVE Previous time stamp */
+ __u64 last_delta; /* SENSITIVE stuck test */
+ __s64 last_delta2; /* SENSITIVE stuck test */
+
+ unsigned int flags; /* Flags used to initialize */
+ unsigned int osr; /* Oversample rate */
#define JENT_MEMORY_ACCESSLOOPS 128
-#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE)
+#define JENT_MEMORY_SIZE \
+ (CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE)
unsigned char *mem; /* Memory access location with size of
* memblocks * memblocksize */
unsigned int memlocation; /* Pointer to byte in *mem */
@@ -85,10 +88,11 @@ struct rand_data {
* bit generation */
/* Repetition Count Test */
- int rct_count; /* Number of stuck values */
+ unsigned int rct_count; /* Number of stuck values */
- /* Adaptive Proportion Test for a significance level of 2^-30 */
-#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+ /* Adaptive Proportion Test cutoff values */
+ unsigned int apt_cutoff; /* Intermittent health test failure */
+ unsigned int apt_cutoff_permanent; /* Permanent health test failure */
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
@@ -96,9 +100,9 @@ struct rand_data {
unsigned int apt_observations; /* Number of collected observations */
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
- unsigned int apt_base_set:1; /* APT base reference set? */
+ unsigned int health_failure; /* Record health failure */
- unsigned int health_failure:1; /* Permanent health failure */
+ unsigned int apt_base_set:1; /* APT base reference set? */
};
/* Flags that can be used to initialize the RNG */
@@ -115,7 +119,16 @@ struct rand_data {
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
#define JENT_EHEALTH 9 /* Health test failed during initialization */
-#define JENT_ERCT 10 /* RCT failed during initialization */
+#define JENT_ERCT 10 /* RCT failed during initialization */
+#define JENT_EHASH 11 /* Hash self test failed */
+#define JENT_EMEM 12 /* Can't allocate memory for initialization */
+
+#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */
+#define JENT_APT_FAILURE 2 /* Failure in APT health test. */
+#define JENT_PERMANENT_FAILURE_SHIFT 16
+#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT)
+#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE)
+#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE)
/*
* The output n bits can receive more than n bits of min entropy, of course,
@@ -132,7 +145,9 @@ struct rand_data {
*/
#define JENT_ENTROPY_SAFETY_FACTOR 64
+#include <linux/array_size.h>
#include <linux/fips.h>
+#include <linux/minmax.h>
#include "jitterentropy.h"
/***************************************************************************
@@ -142,6 +157,47 @@ struct rand_data {
***************************************************************************/
/*
+ * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B
+ * APT.
+ * https://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf
+ * In the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)).
+ * (The original formula wasn't correct because the first symbol must
+ * necessarily have been observed, so there is no chance of observing 0 of these
+ * symbols.)
+ *
+ * For the alpha < 2^-53, R cannot be used as it uses a float data type without
+ * arbitrary precision. A SageMath script is used to calculate those cutoff
+ * values.
+ *
+ * For any value above 14, this yields the maximal allowable value of 512
+ * (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that
+ * renders the test unable to fail).
+ */
+static const unsigned int jent_apt_cutoff_lookup[15] = {
+ 325, 422, 459, 477, 488, 494, 499, 502,
+ 505, 507, 508, 509, 510, 511, 512 };
+static const unsigned int jent_apt_cutoff_permanent_lookup[15] = {
+ 355, 447, 479, 494, 502, 507, 510, 512,
+ 512, 512, 512, 512, 512, 512, 512 };
+
+static void jent_apt_init(struct rand_data *ec, unsigned int osr)
+{
+ /*
+ * Establish the apt_cutoff based on the presumed entropy rate of
+ * 1/osr.
+ */
+ if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) {
+ ec->apt_cutoff = jent_apt_cutoff_lookup[
+ ARRAY_SIZE(jent_apt_cutoff_lookup) - 1];
+ ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[
+ ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1];
+ } else {
+ ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1];
+ ec->apt_cutoff_permanent =
+ jent_apt_cutoff_permanent_lookup[osr - 1];
+ }
+}
+/*
* Reset the APT counter
*
* @ec [in] Reference to entropy collector
@@ -172,8 +228,11 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
if (delta_masked == ec->apt_base) {
ec->apt_count++;
- if (ec->apt_count >= JENT_APT_CUTOFF)
- ec->health_failure = 1;
+ /* Note, ec->apt_count starts with one. */
+ if (ec->apt_count >= ec->apt_cutoff_permanent)
+ ec->health_failure |= JENT_APT_FAILURE_PERMANENT;
+ else if (ec->apt_count >= ec->apt_cutoff)
+ ec->health_failure |= JENT_APT_FAILURE;
}
ec->apt_observations++;
@@ -206,55 +265,38 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
- /*
- * If we have a count less than zero, a previous RCT round identified
- * a failure. We will not overwrite it.
- */
- if (ec->rct_count < 0)
- return;
-
if (stuck) {
ec->rct_count++;
/*
* The cutoff value is based on the following consideration:
- * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
- * In addition, we require an entropy value H of 1/OSR as this
+ * alpha = 2^-30 or 2^-60 as recommended in SP800-90B.
+ * In addition, we require an entropy value H of 1/osr as this
* is the minimum entropy required to provide full entropy.
- * Note, we collect 64 * OSR deltas for inserting them into
- * the entropy pool which should then have (close to) 64 bits
- * of entropy.
+ * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr
+ * deltas for inserting them into the entropy pool which should
+ * then have (close to) DATA_SIZE_BITS bits of entropy in the
+ * conditioned output.
*
* Note, ec->rct_count (which equals to value B in the pseudo
* code of SP800-90B section 4.4.1) starts with zero. Hence
* we need to subtract one from the cutoff value as calculated
- * following SP800-90B.
+ * following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr
+ * or 60*osr.
*/
- if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
+ if ((unsigned int)ec->rct_count >= (60 * ec->osr)) {
+ ec->rct_count = -1;
+ ec->health_failure |= JENT_RCT_FAILURE_PERMANENT;
+ } else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) {
ec->rct_count = -1;
- ec->health_failure = 1;
+ ec->health_failure |= JENT_RCT_FAILURE;
}
} else {
+ /* Reset RCT */
ec->rct_count = 0;
}
}
-/*
- * Is there an RCT health test failure?
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
- */
-static int jent_rct_failure(struct rand_data *ec)
-{
- if (ec->rct_count < 0)
- return 1;
- return 0;
-}
-
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
@@ -308,12 +350,19 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
*
* @ec [in] Reference to entropy collector
*
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
+ * @return a bitmask indicating which tests failed
+ * 0 No health test failure
+ * 1 RCT failure
+ * 2 APT failure
+ * 1<<JENT_PERMANENT_FAILURE_SHIFT RCT permanent failure
+ * 2<<JENT_PERMANENT_FAILURE_SHIFT APT permanent failure
*/
-static int jent_health_failure(struct rand_data *ec)
+static unsigned int jent_health_failure(struct rand_data *ec)
{
+ /* Test is only enabled in FIPS mode */
+ if (!fips_enabled)
+ return 0;
+
return ec->health_failure;
}
@@ -326,15 +375,13 @@ static int jent_health_failure(struct rand_data *ec)
* an entropy collection.
*
* Input:
- * @ec entropy collector struct -- may be NULL
* @bits is the number of low bits of the timer to consider
* @min is the number of bits we shift the timer value to the right at
* the end to make sure we have a guaranteed minimum value
*
* @return Newly calculated loop counter
*/
-static __u64 jent_loop_shuffle(struct rand_data *ec,
- unsigned int bits, unsigned int min)
+static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min)
{
__u64 time = 0;
__u64 shuffle = 0;
@@ -342,12 +389,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
unsigned int mask = (1<<bits) - 1;
jent_get_nstime(&time);
- /*
- * Mix the current state of the random number into the shuffle
- * calculation to balance that shuffle a bit more.
- */
- if (ec)
- time ^= ec->data;
+
/*
* We fold the time value as much as possible to ensure that as many
* bits of the time stamp are included as possible.
@@ -369,81 +411,32 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
* execution time jitter
*
* This function injects the individual bits of the time value into the
- * entropy pool using an LFSR.
- *
- * The code is deliberately inefficient with respect to the bit shifting
- * and shall stay that way. This function is the root cause why the code
- * shall be compiled without optimization. This function not only acts as
- * folding operation, but this function's execution is used to measure
- * the CPU execution time jitter. Any change to the loop in this function
- * implies that careful retesting must be done.
+ * entropy pool using a hash.
*
- * @ec [in] entropy collector struct
- * @time [in] time stamp to be injected
- * @loop_cnt [in] if a value not equal to 0 is set, use the given value as
- * number of loops to perform the folding
- * @stuck [in] Is the time stamp identified as stuck?
+ * ec [in] entropy collector
+ * time [in] time stamp to be injected
+ * stuck [in] Is the time stamp identified as stuck?
*
* Output:
- * updated ec->data
- *
- * @return Number of loops the folding operation is performed
+ * updated hash context in the entropy collector or error code
*/
-static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt,
- int stuck)
+static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
{
- unsigned int i;
- __u64 j = 0;
- __u64 new = 0;
-#define MAX_FOLD_LOOP_BIT 4
-#define MIN_FOLD_LOOP_BIT 0
- __u64 fold_loop_cnt =
- jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT);
-
- /*
- * testing purposes -- allow test app to set the counter, not
- * needed during runtime
- */
- if (loop_cnt)
- fold_loop_cnt = loop_cnt;
- for (j = 0; j < fold_loop_cnt; j++) {
- new = ec->data;
- for (i = 1; (DATA_SIZE_BITS) >= i; i++) {
- __u64 tmp = time << (DATA_SIZE_BITS - i);
-
- tmp = tmp >> (DATA_SIZE_BITS - 1);
-
- /*
- * Fibonacci LSFR with polynomial of
- * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
- * primitive according to
- * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
- * (the shift values are the polynomial values minus one
- * due to counting bits from 0 to 63). As the current
- * position is always the LSB, the polynomial only needs
- * to shift data in from the left without wrap.
- */
- tmp ^= ((new >> 63) & 1);
- tmp ^= ((new >> 60) & 1);
- tmp ^= ((new >> 55) & 1);
- tmp ^= ((new >> 30) & 1);
- tmp ^= ((new >> 27) & 1);
- tmp ^= ((new >> 22) & 1);
- new <<= 1;
- new ^= tmp;
- }
- }
-
- /*
- * If the time stamp is stuck, do not finally insert the value into
- * the entropy pool. Although this operation should not do any harm
- * even when the time stamp has no entropy, SP800-90B requires that
- * any conditioning operation (SP800-90B considers the LFSR to be a
- * conditioning operation) to have an identical amount of input
- * data according to section 3.1.5.
- */
- if (!stuck)
- ec->data = new;
+#define SHA3_HASH_LOOP (1<<3)
+ struct {
+ int rct_count;
+ unsigned int apt_observations;
+ unsigned int apt_count;
+ unsigned int apt_base;
+ } addtl = {
+ ec->rct_count,
+ ec->apt_observations,
+ ec->apt_count,
+ ec->apt_base
+ };
+
+ return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
+ SHA3_HASH_LOOP, stuck);
}
/*
@@ -477,7 +470,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
#define MAX_ACC_LOOP_BIT 7
#define MIN_ACC_LOOP_BIT 0
__u64 acc_loop_cnt =
- jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
+ jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem)
return;
@@ -524,7 +517,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
*
* @return result of stuck test
*/
-static int jent_measure_jitter(struct rand_data *ec)
+static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta)
{
__u64 time = 0;
__u64 current_delta = 0;
@@ -545,14 +538,19 @@ static int jent_measure_jitter(struct rand_data *ec)
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
- jent_lfsr_time(ec, current_delta, 0, stuck);
+ if (jent_condition_data(ec, current_delta, stuck))
+ stuck = 1;
+
+ /* return the raw entropy value */
+ if (ret_current_delta)
+ *ret_current_delta = current_delta;
return stuck;
}
/*
* Generator of one 64 bit random number
- * Function fills rand_data->data
+ * Function fills rand_data->hash_state
*
* @ec [in] Reference to entropy collector
*/
@@ -564,11 +562,11 @@ static void jent_gen_entropy(struct rand_data *ec)
safety_factor = JENT_ENTROPY_SAFETY_FACTOR;
/* priming of the ->prev_time value */
- jent_measure_jitter(ec);
+ jent_measure_jitter(ec, NULL);
while (!jent_health_failure(ec)) {
/* If a stuck measurement is received, repeat measurement */
- if (jent_measure_jitter(ec))
+ if (jent_measure_jitter(ec, NULL))
continue;
/*
@@ -599,9 +597,9 @@ static void jent_gen_entropy(struct rand_data *ec)
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
- * -1 entropy_collector is NULL
- * -2 RCT failed
- * -3 APT test failed
+ * -1 entropy_collector is NULL or the generation failed
+ * -2 Intermittent health failure
+ * -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@@ -612,50 +610,38 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
return -1;
while (len > 0) {
- unsigned int tocopy;
+ unsigned int tocopy, health_test_result;
jent_gen_entropy(ec);
- if (jent_health_failure(ec)) {
- int ret;
-
- if (jent_rct_failure(ec))
- ret = -2;
- else
- ret = -3;
-
+ health_test_result = jent_health_failure(ec);
+ if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) {
/*
- * Re-initialize the noise source
- *
- * If the health test fails, the Jitter RNG remains
- * in failure state and will return a health failure
- * during next invocation.
+ * At this point, the Jitter RNG instance is considered
+ * as a failed instance. There is no rerun of the
+ * startup test any more, because the caller
+ * is assumed to not further use this instance.
*/
- if (jent_entropy_init())
- return ret;
-
- /* Set APT to initial state */
- jent_apt_reset(ec, 0);
- ec->apt_base_set = 0;
-
- /* Set RCT to initial state */
- ec->rct_count = 0;
-
- /* Re-enable Jitter RNG */
- ec->health_failure = 0;
-
+ return -3;
+ } else if (health_test_result) {
/*
- * Return the health test failure status to the
- * caller as the generated value is not appropriate.
+ * Perform startup health tests and return permanent
+ * error if it fails.
*/
- return ret;
+ if (jent_entropy_init(0, 0, NULL, ec)) {
+ /* Mark the permanent error */
+ ec->health_failure &=
+ JENT_RCT_FAILURE_PERMANENT |
+ JENT_APT_FAILURE_PERMANENT;
+ return -3;
+ }
+
+ return -2;
}
- if ((DATA_SIZE_BITS / 8) < len)
- tocopy = (DATA_SIZE_BITS / 8);
- else
- tocopy = len;
- jent_memcpy(p, &ec->data, tocopy);
+ tocopy = min(DATA_SIZE_BITS / 8, len);
+ if (jent_read_random_block(ec->hash_state, p, tocopy))
+ return -1;
len -= tocopy;
p += tocopy;
@@ -669,7 +655,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
***************************************************************************/
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
- unsigned int flags)
+ unsigned int flags,
+ void *hash_state)
{
struct rand_data *entropy_collector;
@@ -681,20 +668,28 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
/* Allocate memory for adding variations based on memory
* access
*/
- entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE);
+ entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE);
if (!entropy_collector->mem) {
jent_zfree(entropy_collector);
return NULL;
}
- entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE;
- entropy_collector->memblocks = JENT_MEMORY_BLOCKS;
+ entropy_collector->memblocksize =
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE;
+ entropy_collector->memblocks =
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS;
entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
}
/* verify and set the oversampling rate */
if (osr == 0)
- osr = 1; /* minimum sampling rate is 1 */
+ osr = 1; /* H_submitter = 1 / osr */
entropy_collector->osr = osr;
+ entropy_collector->flags = flags;
+
+ entropy_collector->hash_state = hash_state;
+
+ /* Initialize the APT */
+ jent_apt_init(entropy_collector, osr);
/* fill the data pad with non-zero values */
jent_gen_entropy(entropy_collector);
@@ -704,24 +699,39 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
void jent_entropy_collector_free(struct rand_data *entropy_collector)
{
- jent_zfree(entropy_collector->mem);
+ jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE);
entropy_collector->mem = NULL;
jent_zfree(entropy_collector);
}
-int jent_entropy_init(void)
+int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state,
+ struct rand_data *p_ec)
{
- int i;
- __u64 delta_sum = 0;
- __u64 old_delta = 0;
- unsigned int nonstuck = 0;
- int time_backwards = 0;
- int count_mod = 0;
- int count_stuck = 0;
- struct rand_data ec = { 0 };
-
- /* Required for RCT */
- ec.osr = 1;
+ /*
+ * If caller provides an allocated ec, reuse it which implies that the
+ * health test entropy data is used to further still the available
+ * entropy pool.
+ */
+ struct rand_data *ec = p_ec;
+ int i, time_backwards = 0, ret = 0, ec_free = 0;
+ unsigned int health_test_result;
+
+ if (!ec) {
+ ec = jent_entropy_collector_alloc(osr, flags, hash_state);
+ if (!ec)
+ return JENT_EMEM;
+ ec_free = 1;
+ } else {
+ /* Reset the APT */
+ jent_apt_reset(ec, 0);
+ /* Ensure that a new APT base is obtained */
+ ec->apt_base_set = 0;
+ /* Reset the RCT */
+ ec->rct_count = 0;
+ /* Reset intermittent, leave permanent health test result */
+ ec->health_failure &= (~JENT_RCT_FAILURE);
+ ec->health_failure &= (~JENT_APT_FAILURE);
+ }
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
@@ -750,31 +760,28 @@ int jent_entropy_init(void)
#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
- __u64 time = 0;
- __u64 time2 = 0;
- __u64 delta = 0;
- unsigned int lowdelta = 0;
- int stuck;
+ __u64 start_time = 0, end_time = 0, delta = 0;
/* Invoke core entropy collection logic */
- jent_get_nstime(&time);
- ec.prev_time = time;
- jent_lfsr_time(&ec, time, 0, 0);
- jent_get_nstime(&time2);
+ jent_measure_jitter(ec, &delta);
+ end_time = ec->prev_time;
+ start_time = ec->prev_time - delta;
/* test whether timer works */
- if (!time || !time2)
- return JENT_ENOTIME;
- delta = jent_delta(time, time2);
+ if (!start_time || !end_time) {
+ ret = JENT_ENOTIME;
+ goto out;
+ }
+
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
* implies that we also have a high resolution timer
*/
- if (!delta)
- return JENT_ECOARSETIME;
-
- stuck = jent_stuck(&ec, delta);
+ if (!delta || (end_time == start_time)) {
+ ret = JENT_ECOARSETIME;
+ goto out;
+ }
/*
* up to here we did not modify any variable that will be
@@ -786,51 +793,9 @@ int jent_entropy_init(void)
if (i < CLEARCACHE)
continue;
- if (stuck)
- count_stuck++;
- else {
- nonstuck++;
-
- /*
- * Ensure that the APT succeeded.
- *
- * With the check below that count_stuck must be less
- * than 10% of the overall generated raw entropy values
- * it is guaranteed that the APT is invoked at
- * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times.
- */
- if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
- jent_apt_reset(&ec,
- delta & JENT_APT_WORD_MASK);
- if (jent_health_failure(&ec))
- return JENT_EHEALTH;
- }
- }
-
- /* Validate RCT */
- if (jent_rct_failure(&ec))
- return JENT_ERCT;
-
/* test whether we have an increasing timer */
- if (!(time2 > time))
+ if (!(end_time > start_time))
time_backwards++;
-
- /* use 32 bit value to ensure compilation on 32 bit arches */
- lowdelta = time2 - time;
- if (!(lowdelta % 100))
- count_mod++;
-
- /*
- * ensure that we have a varying delta timer which is necessary
- * for the calculation of entropy -- perform this check
- * only after the first loop is executed as we need to prime
- * the old_data value
- */
- if (delta > old_delta)
- delta_sum += (delta - old_delta);
- else
- delta_sum += (old_delta - delta);
- old_delta = delta;
}
/*
@@ -840,31 +805,22 @@ int jent_entropy_init(void)
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
- if (time_backwards > 3)
- return JENT_ENOMONOTONIC;
-
- /*
- * Variations of deltas of time must on average be larger
- * than 1 to ensure the entropy estimation
- * implied with 1 is preserved
- */
- if ((delta_sum) <= 1)
- return JENT_EVARVAR;
+ if (time_backwards > 3) {
+ ret = JENT_ENOMONOTONIC;
+ goto out;
+ }
- /*
- * Ensure that we have variations in the time stamp below 10 for at
- * least 10% of all checks -- on some platforms, the counter increments
- * in multiples of 100, but not always
- */
- if ((TESTLOOPCOUNT/10 * 9) < count_mod)
- return JENT_ECOARSETIME;
+ /* Did we encounter a health test failure? */
+ health_test_result = jent_health_failure(ec);
+ if (health_test_result) {
+ ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT :
+ JENT_EHEALTH;
+ goto out;
+ }
- /*
- * If we have more than 90% stuck results, then this Jitter RNG is
- * likely to not work well.
- */
- if ((TESTLOOPCOUNT/10 * 9) < count_stuck)
- return JENT_ESTUCK;
+out:
+ if (ec_free)
+ jent_entropy_collector_free(ec);
- return 0;
+ return ret;
}
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
index b7397b617ef0..4c5dbf2a8d8f 100644
--- a/crypto/jitterentropy.h
+++ b/crypto/jitterentropy.h
@@ -1,16 +1,32 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+extern void *jent_kvzalloc(unsigned int len);
+extern void jent_kvzfree(void *ptr, unsigned int len);
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
-extern void jent_panic(char *s);
-extern void jent_memcpy(void *dest, const void *src, unsigned int n);
extern void jent_get_nstime(__u64 *out);
+extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ unsigned int addtl_len, __u64 hash_loop_cnt,
+ unsigned int stuck);
+int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len);
struct rand_data;
-extern int jent_entropy_init(void);
+extern int jent_entropy_init(unsigned int osr, unsigned int flags,
+ void *hash_state, struct rand_data *p_ec);
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len);
extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
- unsigned int flags);
+ unsigned int flags,
+ void *hash_state);
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
+
+#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE
+int jent_raw_hires_entropy_store(__u64 value);
+void jent_testing_init(void);
+void jent_testing_exit(void);
+#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
+static inline int jent_raw_hires_entropy_store(__u64 value) { return 0; }
+static inline void jent_testing_init(void) { }
+static inline void jent_testing_exit(void) { }
+#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c
index 58edf7797abf..b7a6bf9da773 100644
--- a/crypto/kdf_sp800108.c
+++ b/crypto/kdf_sp800108.c
@@ -125,9 +125,13 @@ static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = {
static int __init crypto_kdf108_init(void)
{
- int ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
- crypto_kdf108_setkey, crypto_kdf108_ctr_generate);
+ int ret;
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
+ return 0;
+
+ ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
+ crypto_kdf108_setkey, crypto_kdf108_ctr_generate);
if (ret) {
if (fips_enabled)
panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n",
@@ -136,7 +140,7 @@ static int __init crypto_kdf108_init(void)
WARN(1,
"alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n",
ret);
- } else {
+ } else if (fips_enabled) {
pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n");
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
deleted file mode 100644
index 054d9a216fc9..000000000000
--- a/crypto/keywrap.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Key Wrapping: RFC3394 / NIST SP800-38F
- *
- * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL2
- * are required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * Note for using key wrapping:
- *
- * * The result of the encryption operation is the ciphertext starting
- * with the 2nd semiblock. The first semiblock is provided as the IV.
- * The IV used to start the encryption operation is the default IV.
- *
- * * The input for the decryption is the first semiblock handed in as an
- * IV. The ciphertext is the data starting with the 2nd semiblock. The
- * return code of the decryption operation will be EBADMSG in case an
- * integrity error occurs.
- *
- * To obtain the full result of an encryption as expected by SP800-38F, the
- * caller must allocate a buffer of plaintext + 8 bytes:
- *
- * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
- * u8 data[datalen];
- * u8 *iv = data;
- * u8 *pt = data + crypto_skcipher_ivsize(tfm);
- * <ensure that pt contains the plaintext of size ptlen>
- * sg_init_one(&sg, pt, ptlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
- *
- * ==> After encryption, data now contains full KW result as per SP800-38F.
- *
- * In case of decryption, ciphertext now already has the expected length
- * and must be segmented appropriately:
- *
- * unsigned int datalen = CTLEN;
- * u8 data[datalen];
- * <ensure that data contains full ciphertext>
- * u8 *iv = data;
- * u8 *ct = data + crypto_skcipher_ivsize(tfm);
- * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
- * sg_init_one(&sg, ct, ctlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
- *
- * ==> After decryption (which hopefully does not return EBADMSG), the ct
- * pointer now points to the plaintext of size ctlen.
- *
- * Note 2: KWP is not implemented as this would defy in-place operation.
- * If somebody wants to wrap non-aligned data, he should simply pad
- * the input with zeros to fill it up to the 8 byte boundary.
- */
-
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-
-struct crypto_kw_block {
-#define SEMIBSIZE 8
- __be64 A;
- __be64 R;
-};
-
-/*
- * Fast forward the SGL to the "end" length minus SEMIBSIZE.
- * The start in the SGL defined by the fast-forward is returned with
- * the walk variable
- */
-static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
- struct scatterlist *sg,
- unsigned int end)
-{
- unsigned int skip = 0;
-
- /* The caller should only operate on full SEMIBLOCKs. */
- BUG_ON(end < SEMIBSIZE);
-
- skip = end - SEMIBSIZE;
- while (sg) {
- if (sg->length > skip) {
- scatterwalk_start(walk, sg);
- scatterwalk_advance(walk, skip);
- break;
- }
-
- skip -= sg->length;
- sg = sg_next(sg);
- }
-}
-
-static int crypto_kw_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 6 * ((req->cryptlen) >> 3);
- unsigned int i;
- int ret = 0;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /* Place the IV into block A */
- memcpy(&block.A, req->iv, SEMIBSIZE);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- while (nbytes) {
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t--;
- /* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
-
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
- ret = -EBADMSG;
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return ret;
-}
-
-static int crypto_kw_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 1;
- unsigned int i;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV that occupies the first semiblock.
- * This means that the dst memory must be one semiblock larger than src.
- * Also ensure that the given data is aligned to semiblock.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /*
- * Place the predefined IV into block A -- for encrypt, the caller
- * does not need to provide an IV, but he needs to fetch the final IV.
- */
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- scatterwalk_start(&src_walk, src);
- scatterwalk_start(&dst_walk, dst);
-
- while (nbytes) {
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t++;
-
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* establish the IV for the caller to pick up */
- memcpy(req->iv, &block.A, SEMIBSIZE);
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return 0;
-}
-
-static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- inst = skcipher_alloc_instance_simple(tmpl, tb);
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- alg = skcipher_ialg_simple(inst);
-
- err = -EINVAL;
- /* Section 5.1 requirement for KW */
- if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
- goto out_free_inst;
-
- inst->alg.base.cra_blocksize = SEMIBSIZE;
- inst->alg.base.cra_alignmask = 0;
- inst->alg.ivsize = SEMIBSIZE;
-
- inst->alg.encrypt = crypto_kw_encrypt;
- inst->alg.decrypt = crypto_kw_decrypt;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err) {
-out_free_inst:
- inst->free(inst);
- }
-
- return err;
-}
-
-static struct crypto_template crypto_kw_tmpl = {
- .name = "kw",
- .create = crypto_kw_create,
- .module = THIS_MODULE,
-};
-
-static int __init crypto_kw_init(void)
-{
- return crypto_register_template(&crypto_kw_tmpl);
-}
-
-static void __exit crypto_kw_exit(void)
-{
- crypto_unregister_template(&crypto_kw_tmpl);
-}
-
-subsys_initcall(crypto_kw_init);
-module_exit(crypto_kw_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
-MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
-MODULE_ALIAS_CRYPTO("kw");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/khazad.c b/crypto/khazad.c
index f19339954c89..024264ee9cd1 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -19,11 +19,11 @@
*
*/
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
-#include <linux/crypto.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
@@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
- /* key is supposed to be 32-bit aligned */
- K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
- K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
+ K2 = get_unaligned_be64(&in_key[0]);
+ K1 = get_unaligned_be64(&in_key[8]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
+ u8 *dst, const u8 *src)
{
- const __be64 *src = (const __be64 *)plaintext;
- __be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
- state = be64_to_cpu(*src) ^ roundKey[0];
+ state = get_unaligned_be64(src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
@@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
- *dst = cpu_to_be64(state);
+ put_unaligned_be64(state, dst);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
@@ -876,7 +871,7 @@ static void __exit khazad_mod_fini(void)
}
-subsys_initcall(khazad_mod_init);
+module_init(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 678e871ce418..2e0cefe7a25f 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -5,23 +5,20 @@
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*/
+
+#include <crypto/internal/kpp.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_kpp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
@@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
-#else
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -82,11 +73,14 @@ static const struct crypto_type crypto_kpp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_kpp_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
.tfmsize = offsetof(struct crypto_kpp, base),
+ .algsize = offsetof(struct kpp_alg, base),
};
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig
new file mode 100644
index 000000000000..4d0476e13f3c
--- /dev/null
+++ b/crypto/krb5/Kconfig
@@ -0,0 +1,26 @@
+config CRYPTO_KRB5
+ tristate "Kerberos 5 crypto"
+ select CRYPTO_MANAGER
+ select CRYPTO_KRB5ENC
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH_INFO
+ select CRYPTO_HMAC
+ select CRYPTO_CMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_AES
+ select CRYPTO_CAMELLIA
+ help
+ Provide a library for provision of Kerberos-5-based crypto. This is
+ intended for network filesystems to use.
+
+config CRYPTO_KRB5_SELFTESTS
+ bool "Kerberos 5 crypto selftests"
+ depends on CRYPTO_KRB5
+ help
+ Turn on some self-testing for the kerberos 5 crypto functions. These
+ will be performed on module load or boot, if compiled in.
diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile
new file mode 100644
index 000000000000..d38890c0b247
--- /dev/null
+++ b/crypto/krb5/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for asymmetric cryptographic keys
+#
+
+krb5-y += \
+ krb5_kdf.o \
+ krb5_api.o \
+ rfc3961_simplified.o \
+ rfc3962_aes.o \
+ rfc6803_camellia.o \
+ rfc8009_aes2.o
+
+krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \
+ selftest.o \
+ selftest_data.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5.o
diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h
new file mode 100644
index 000000000000..a59084ffafe8
--- /dev/null
+++ b/crypto/krb5/internal.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos5 crypto internals
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/krb5.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+
+/*
+ * Profile used for key derivation and encryption.
+ */
+struct krb5_crypto_profile {
+ /* Pseudo-random function */
+ int (*calc_PRF)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+ /* Checksum key derivation */
+ int (*calc_Kc)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+ /* Encryption key derivation */
+ int (*calc_Ke)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ke,
+ gfp_t gfp);
+
+ /* Integrity key derivation */
+ int (*calc_Ki)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ki,
+ gfp_t gfp);
+
+ /* Derive the keys needed for an encryption AEAD object. */
+ int (*derive_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for an encryption AEAD object. */
+ int (*load_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Derive the key needed for a checksum hash object. */
+ int (*derive_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for a checksum hash object. */
+ int (*load_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Encrypt data in-place, inserting confounder and checksum. */
+ ssize_t (*encrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+
+ /* Decrypt data in-place, removing confounder and checksum */
+ int (*decrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+ /* Generate a MIC on part of a packet, inserting the checksum */
+ ssize_t (*get_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+
+ /* Verify the MIC on a piece of data, removing the checksum */
+ int (*verify_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+};
+
+/*
+ * Crypto size/alignment rounding convenience macros.
+ */
+#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN))
+
+#define krb5_aead_size(TFM) \
+ crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM))
+#define krb5_aead_ivsize(TFM) \
+ crypto_roundup(crypto_aead_ivsize(TFM))
+#define krb5_shash_size(TFM) \
+ crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM))
+#define krb5_digest_size(TFM) \
+ crypto_roundup(crypto_shash_digestsize(TFM))
+#define round16(x) (((x) + 15) & ~15)
+
+/*
+ * Self-testing data.
+ */
+struct krb5_prf_test {
+ u32 etype;
+ const char *name, *key, *octet, *prf;
+};
+
+struct krb5_key_test_one {
+ u32 use;
+ const char *key;
+};
+
+struct krb5_key_test {
+ u32 etype;
+ const char *name, *key;
+ struct krb5_key_test_one Kc, Ke, Ki;
+};
+
+struct krb5_enc_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct;
+};
+
+struct krb5_mic_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *K0, *Kc, *mic;
+};
+
+/*
+ * krb5_api.c
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp);
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+/*
+ * krb5_kdf.c
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+
+/*
+ * rfc3961_simplified.c
+ */
+extern const struct krb5_crypto_profile rfc3961_simplified_profile;
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len);
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len);
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * rfc3962_aes.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96;
+
+/*
+ * rfc6803_camellia.c
+ */
+extern const struct krb5_enctype krb5_camellia128_cts_cmac;
+extern const struct krb5_enctype krb5_camellia256_cts_cmac;
+
+/*
+ * rfc8009_aes2.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192;
+
+/*
+ * selftest.c
+ */
+#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS
+int krb5_selftest(void);
+#else
+static inline int krb5_selftest(void) { return 0; }
+#endif
+
+/*
+ * selftest_data.c
+ */
+extern const struct krb5_prf_test krb5_prf_tests[];
+extern const struct krb5_key_test krb5_key_tests[];
+extern const struct krb5_enc_test krb5_enc_tests[];
+extern const struct krb5_mic_test krb5_mic_tests[];
diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c
new file mode 100644
index 000000000000..23026d4206c8
--- /dev/null
+++ b/crypto/krb5/krb5_api.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos 5 crypto library.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("Kerberos 5 crypto");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+static const struct krb5_enctype *const krb5_supported_enctypes[] = {
+ &krb5_aes128_cts_hmac_sha1_96,
+ &krb5_aes256_cts_hmac_sha1_96,
+ &krb5_aes128_cts_hmac_sha256_128,
+ &krb5_aes256_cts_hmac_sha384_192,
+ &krb5_camellia128_cts_cmac,
+ &krb5_camellia256_cts_cmac,
+};
+
+/**
+ * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type
+ * @enctype: The standard Kerberos encryption type number
+ *
+ * Look up a Kerberos encryption type by number. If successful, returns a
+ * pointer to the type tables; returns NULL otherwise.
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype)
+{
+ const struct krb5_enctype *krb5;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) {
+ krb5 = krb5_supported_enctypes[i];
+ if (krb5->etype == enctype)
+ return krb5;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(crypto_krb5_find_enctype);
+
+/**
+ * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @data_size: How much data we want to allow for
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much buffer space is required to wrap a given amount of data.
+ * This allows for a confounder, padding and checksum as appropriate. The
+ * amount of buffer required is returned and the offset into the buffer at
+ * which the data will start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset = krb5->cksum_len;
+ return krb5->cksum_len + data_size;
+
+ case KRB5_ENCRYPT_MODE:
+ *_offset = krb5->conf_len;
+ return krb5->conf_len + data_size + krb5->cksum_len;
+
+ default:
+ WARN_ON(1);
+ *_offset = 0;
+ return 0;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_buffer);
+
+/**
+ * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @_buffer_size: How much buffer we want to allow for (may be reduced)
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much data can be fitted into given amount of buffer. This
+ * allows for a confounder, padding and checksum as appropriate. The amount of
+ * data that will fit is returned, the amount of buffer required is shrunk to
+ * allow for alignment and the offset into the buffer at which the data will
+ * start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset)
+{
+ size_t buffer_size = *_buffer_size, data_size;
+
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ if (WARN_ON(buffer_size < krb5->cksum_len + 1))
+ goto bad;
+ *_offset = krb5->cksum_len;
+ return buffer_size - krb5->cksum_len;
+
+ case KRB5_ENCRYPT_MODE:
+ if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len))
+ goto bad;
+ data_size = buffer_size - krb5->cksum_len;
+ *_offset = krb5->conf_len;
+ return data_size - krb5->conf_len;
+
+ default:
+ WARN_ON(1);
+ goto bad;
+ }
+
+bad:
+ *_offset = 0;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_data);
+
+/**
+ * crypto_krb5_where_is_the_data - Find the data in a decrypted message
+ * @krb5: The encoding to use.
+ * @mode: Mode of operation
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Find the offset and size of the data in a secure message so that this
+ * information can be used in the metadata buffer which will get added to the
+ * digest by crypto_krb5_verify_mic().
+ */
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ return;
+ case KRB5_ENCRYPT_MODE:
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ return;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_where_is_the_data);
+
+/*
+ * Prepare the encryption with derived key data.
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_aead_setkey(ci, keys->data, keys->len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ ret = crypto_aead_setauthsize(ci, krb5->cksum_len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_aead(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch encrypt and decrypt operations.
+ */
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp);
+ if (ret < 0)
+ goto err;
+
+ ci = krb5_prepare_encryption(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_encryption);
+
+/*
+ * Prepare the checksum with derived key data.
+ */
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_shash_setkey(ci, Kc->data, Kc->len);
+ if (ret < 0) {
+ pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_shash(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch get_mic and verify_mic operations.
+ */
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp);
+ if (ret < 0) {
+ pr_err("get_Kc failed %d\n", ret);
+ goto err;
+ }
+
+ ci = krb5_prepare_checksum(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_checksum);
+
+/**
+ * crypto_krb5_encrypt - Apply Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ * @preconfounded: True if the confounder is already inserted.
+ *
+ * Using the specified Kerberos encoding, insert a confounder and padding as
+ * needed, encrypt this and the data in place and insert an integrity checksum
+ * into the buffer.
+ *
+ * The buffer must include space for the confounder, the checksum and any
+ * padding required. The caller can preinsert the confounder into the buffer
+ * (for testing, for example).
+ *
+ * The resulting secured blob may be less than the size of the buffer.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder
+ * is too short or the data is misaligned. Other errors may also be returned
+ * from the crypto layer.
+ */
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len,
+ data_offset, data_len, preconfounded);
+}
+EXPORT_SYMBOL(crypto_krb5_encrypt);
+
+/**
+ * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum and decrypt the secure region, stripping off the confounder.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data plus the trailing padding are stored. The caller is responsible
+ * for working out how much padding there is and removing it.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the integrity checksum doesn't match). Other errors may also be returned
+ * from the crypto layer.
+ */
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_decrypt);
+
+/**
+ * crypto_krb5_get_mic - Apply Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ *
+ * Using the specified Kerberos encoding, calculate and insert an integrity
+ * checksum into the buffer.
+ *
+ * The buffer must include space for the checksum at the front.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for
+ * the checksum is too short. Other errors may also be returned from the
+ * crypto layer.
+ */
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len,
+ data_offset, data_len);
+}
+EXPORT_SYMBOL(crypto_krb5_get_mic);
+
+/**
+ * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data is stored.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the checksum doesn't match). Other errors may also be returned from the
+ * crypto layer.
+ */
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg,
+ _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_verify_mic);
+
+static int __init crypto_krb5_init(void)
+{
+ return krb5_selftest();
+}
+module_init(crypto_krb5_init);
+
+static void __exit crypto_krb5_exit(void)
+{
+}
+module_exit(crypto_krb5_exit);
diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c
new file mode 100644
index 000000000000..6699e5469d1b
--- /dev/null
+++ b/crypto/krb5/krb5_kdf.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos key derivation.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/**
+ * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402]
+ * @krb5: The encryption type to use
+ * @K: The protocol key for the pseudo-random function
+ * @L: The length of the output
+ * @S: The input octet string
+ * @result: Result buffer, sized to krb5->prf_len
+ * @gfp: Allocation restrictions
+ *
+ * Calculate the kerberos pseudo-random function, PRF+() by the following
+ * method:
+ *
+ * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn)
+ * Tn = PRF(K, n || S)
+ * [rfc4402 sec 2]
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct krb5_buffer T_series, Tn, n_S;
+ void *buffer;
+ int ret, n = 1;
+
+ Tn.len = krb5->prf_len;
+ T_series.len = 0;
+ n_S.len = 4 + S->len;
+
+ buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ T_series.data = buffer;
+ n_S.data = buffer + round16(L + Tn.len);
+ memcpy(n_S.data + 4, S->data, S->len);
+
+ while (T_series.len < L) {
+ *(__be32 *)(n_S.data) = htonl(n);
+ Tn.data = T_series.data + Tn.len * (n - 1);
+ ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp);
+ if (ret < 0)
+ goto err;
+ T_series.len += Tn.len;
+ n++;
+ }
+
+ /* Truncate to L */
+ memcpy(result->data, T_series.data, L);
+ ret = 0;
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+EXPORT_SYMBOL(crypto_krb5_calc_PRFplus);
+
+/**
+ * krb5_derive_Kc - Derive key Kc and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Kc checksumming key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_CHECKSUM;
+
+ key->len = krb5->Kc_len;
+ return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ke - Derive key Ke and install into an skcipher
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ke encryption key. The key is stored into the prepared
+ * buffer.
+ */
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_ENCRYPTION;
+
+ key->len = krb5->Ke_len;
+ return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ki - Derive key Ki and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ki integrity checksum key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_INTEGRITY;
+
+ key->len = krb5->Ki_len;
+ return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp);
+}
diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c
new file mode 100644
index 000000000000..e49cbdec7c40
--- /dev/null
+++ b/crypto/krb5/rfc3961_simplified.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3961 Kerberos 5 simplified crypto profile.
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/lcm.h>
+#include <linux/rtnetlink.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/* Maximum blocksize for the supported crypto algorithms */
+#define KRB5_MAX_BLOCKSIZE (16)
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len)
+{
+ struct sg_mapping_iter miter;
+ size_t i, n;
+ int ret = 0;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_FROM_SG | SG_MITER_LOCAL);
+ sg_miter_skip(&miter, offset);
+ for (i = 0; i < len; i += n) {
+ sg_miter_next(&miter);
+ n = min(miter.length, len - i);
+ ret = crypto_shash_update(desc, miter.addr, n);
+ if (ret < 0)
+ break;
+ }
+ sg_miter_stop(&miter);
+ return ret;
+}
+
+static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv,
+ const struct krb5_buffer *in, struct krb5_buffer *out)
+{
+ struct scatterlist sg[1];
+ u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0};
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ int ret;
+
+ if (WARN_ON(in->len != out->len))
+ return -EINVAL;
+ if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0)
+ return -EINVAL;
+
+ if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE)
+ return -EINVAL;
+
+ if (iv)
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
+
+ memcpy(out->data, in->data, out->len);
+ sg_init_one(sg, out->data, out->len);
+
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg, sg, out->len, local_iv);
+
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return ret;
+}
+
+/*
+ * Calculate an unkeyed basic hash.
+ */
+static int rfc3961_calc_H(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *data,
+ struct krb5_buffer *digest,
+ gfp_t gfp)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ size_t desc_size;
+ int ret = -ENOMEM;
+
+ tfm = crypto_alloc_shash(krb5->hash_name, 0, 0);
+ if (IS_ERR(tfm))
+ return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
+
+ desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+
+ desc = kzalloc(desc_size, gfp);
+ if (!desc)
+ goto error_tfm;
+
+ digest->len = crypto_shash_digestsize(tfm);
+ digest->data = kzalloc(digest->len, gfp);
+ if (!digest->data)
+ goto error_desc;
+
+ desc->tfm = tfm;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error_digest;
+
+ ret = crypto_shash_finup(desc, data->data, data->len, digest->data);
+ if (ret < 0)
+ goto error_digest;
+
+ goto error_desc;
+
+error_digest:
+ kfree_sensitive(digest->data);
+error_desc:
+ kfree_sensitive(desc);
+error_tfm:
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+/*
+ * This is the n-fold function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result)
+{
+ const u8 *in = source->data;
+ u8 *out = result->data;
+ unsigned long ulcm;
+ unsigned int inbits, outbits;
+ int byte, i, msbit;
+
+ /* the code below is more readable if I make these bytes instead of bits */
+ inbits = source->len;
+ outbits = result->len;
+
+ /* first compute lcm(n,k) */
+ ulcm = lcm(inbits, outbits);
+
+ /* now do the real work */
+ memset(out, 0, outbits);
+ byte = 0;
+
+ /* this will end up cycling through k lcm(k,n)/k times, which
+ * is correct.
+ */
+ for (i = ulcm-1; i >= 0; i--) {
+ /* compute the msbit in k which gets added into this byte */
+ msbit = (
+ /* first, start with the msbit in the first,
+ * unrotated byte
+ */
+ ((inbits << 3) - 1) +
+ /* then, for each byte, shift to the right
+ * for each repetition
+ */
+ (((inbits << 3) + 13) * (i/inbits)) +
+ /* last, pick out the correct byte within
+ * that shifted repetition
+ */
+ ((inbits - (i % inbits)) << 3)
+ ) % (inbits << 3);
+
+ /* pull out the byte value itself */
+ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) |
+ (in[((inbits) - (msbit >> 3)) % inbits]))
+ >> ((msbit & 7) + 1)) & 0xff;
+
+ /* do the addition */
+ byte += out[i % outbits];
+ out[i % outbits] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+
+ /* if there's a carry bit left over, add it back in */
+ if (byte) {
+ for (i = outbits - 1; i >= 0; i--) {
+ /* do the addition */
+ byte += out[i];
+ out[i] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+ }
+}
+
+/*
+ * Calculate a derived key, DK(Base Key, Well-Known Constant)
+ *
+ * DK(Key, Constant) = random-to-key(DR(Key, Constant))
+ * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state))
+ * K1 = E(Key, n-fold(Constant), initial-cipher-state)
+ * K2 = E(Key, K1, initial-cipher-state)
+ * K3 = E(Key, K2, initial-cipher-state)
+ * K4 = ...
+ * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)
+ * [rfc3961 sec 5.1]
+ */
+static int rfc3961_calc_DK(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *inkey,
+ const struct krb5_buffer *in_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ unsigned int blocksize, keybytes, keylength, n;
+ struct krb5_buffer inblock, outblock, rawkey;
+ struct crypto_sync_skcipher *cipher;
+ int ret = -EINVAL;
+
+ blocksize = krb5->block_len;
+ keybytes = krb5->key_bytes;
+ keylength = krb5->key_len;
+
+ if (inkey->len != keylength || result->len != keylength)
+ return -EINVAL;
+ if (!krb5->random_to_key && result->len != keybytes)
+ return -EINVAL;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err_return;
+ }
+ ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len);
+ if (ret < 0)
+ goto err_free_cipher;
+
+ ret = -ENOMEM;
+ inblock.data = kzalloc(blocksize * 2 + keybytes, gfp);
+ if (!inblock.data)
+ goto err_free_cipher;
+
+ inblock.len = blocksize;
+ outblock.data = inblock.data + blocksize;
+ outblock.len = blocksize;
+ rawkey.data = outblock.data + blocksize;
+ rawkey.len = keybytes;
+
+ /* initialize the input block */
+
+ if (in_constant->len == inblock.len)
+ memcpy(inblock.data, in_constant->data, inblock.len);
+ else
+ rfc3961_nfold(in_constant, &inblock);
+
+ /* loop encrypting the blocks until enough key bytes are generated */
+ n = 0;
+ while (n < rawkey.len) {
+ rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock);
+
+ if (keybytes - n <= outblock.len) {
+ memcpy(rawkey.data + n, outblock.data, keybytes - n);
+ break;
+ }
+
+ memcpy(rawkey.data + n, outblock.data, outblock.len);
+ memcpy(inblock.data, outblock.data, outblock.len);
+ n += outblock.len;
+ }
+
+ /* postprocess the key */
+ if (!krb5->random_to_key) {
+ /* Identity random-to-key function. */
+ memcpy(result->data, rawkey.data, rawkey.len);
+ ret = 0;
+ } else {
+ ret = krb5->random_to_key(krb5, &rawkey, result);
+ }
+
+ kfree_sensitive(inblock.data);
+err_free_cipher:
+ crypto_free_sync_skcipher(cipher);
+err_return:
+ return ret;
+}
+
+/*
+ * Calculate single encryption, E()
+ *
+ * E(Key, octets)
+ */
+static int rfc3961_calc_E(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *in_data,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_sync_skcipher *cipher;
+ int ret;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err;
+ }
+
+ ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len);
+ if (ret < 0)
+ goto err_free;
+
+ ret = rfc3961_do_encrypt(cipher, NULL, in_data, result);
+
+err_free:
+ crypto_free_sync_skcipher(cipher);
+err:
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * tmp1 = H(octet-string)
+ * tmp2 = truncate tmp1 to multiple of m
+ * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc3961 sec 5.3]
+ */
+static int rfc3961_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct krb5_buffer derived_key;
+ struct krb5_buffer tmp1, tmp2;
+ unsigned int m = krb5->block_len;
+ void *buffer;
+ int ret;
+
+ if (result->len != krb5->prf_len)
+ return -EINVAL;
+
+ tmp1.len = krb5->hash_len;
+ derived_key.len = krb5->key_bytes;
+ buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ tmp1.data = buffer;
+ derived_key.data = buffer + round16(tmp1.len);
+
+ ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp);
+ if (ret < 0)
+ goto err;
+
+ tmp2.len = tmp1.len & ~(m - 1);
+ tmp2.data = tmp1.data;
+
+ ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp);
+ if (ret < 0)
+ goto err;
+
+ ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp);
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Derive the Ke and Ki keys and package them into a key parameter that can be
+ * given to the setkey of a authenc AEAD crypto object.
+ */
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct krb5_buffer Ke, Ki;
+ struct rtattr *rta;
+ int ret;
+
+ Ke.len = krb5->Ke_len;
+ Ki.len = krb5->Ki_len;
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke.len);
+
+ Ki.data = (void *)(param + 1);
+ Ke.data = Ki.data + Ki.len;
+
+ ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp);
+ if (ret < 0) {
+ pr_err("get_Ke failed %d\n", ret);
+ return ret;
+ }
+ ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp);
+ if (ret < 0)
+ pr_err("get_Ki failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package predefined Ke and Ki keys and into a key parameter that can be given
+ * to the setkey of an authenc AEAD crypto object.
+ */
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta;
+
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke->len);
+ memcpy((void *)(param + 1), Ki->data, Ki->len);
+ memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len);
+ return 0;
+}
+
+/*
+ * Derive the Kc key for checksum-only mode and package it into a key parameter
+ * that can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ int ret;
+
+ setkey->len = krb5->Kc_len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp);
+ if (ret < 0)
+ pr_err("get_Kc failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package a predefined Kc key for checksum-only mode into a key parameter that
+ * can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ setkey->len = krb5->Kc_len;
+ setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Apply encryption and checksumming functions to part of a scatterlist.
+ */
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* Hash and encrypt the message. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. The offset and
+ * length are updated to reflect the actual content of the encrypted region.
+ */
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Decrypt the message and verify its checksum. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Generate a checksum over some metadata and part of an skbuff and insert the
+ * MIC into the skbuff immediately prior to the data.
+ */
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ struct shash_desc *desc;
+ ssize_t ret, done;
+ size_t bsize;
+ void *buffer, *digest;
+
+ if (WARN_ON(data_offset != krb5->cksum_len))
+ return -EMSGSIZE;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Calculate the MIC with key Kc and store it into the skb */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ ret = crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ if (ret < 0)
+ goto error;
+
+ digest = buffer + krb5_shash_size(shash);
+ ret = crypto_shash_final(desc, digest);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len,
+ data_offset - krb5->cksum_len);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ ret = krb5->cksum_len + data_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Check the MIC on a region of an skbuff. The offset and length are updated
+ * to reflect the actual content of the secure region.
+ */
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct shash_desc *desc;
+ ssize_t done;
+ size_t bsize, data_offset, data_len, offset = *_offset, len = *_len;
+ void *buffer = NULL;
+ int ret;
+ u8 *cksum, *cksum2;
+
+ if (len < krb5->cksum_len)
+ return -EPROTO;
+ data_offset = offset + krb5->cksum_len;
+ data_len = len - krb5->cksum_len;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ cksum = buffer +
+ krb5_shash_size(shash);
+ cksum2 = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ /* Calculate the MIC */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ crypto_shash_final(desc, cksum);
+
+ ret = -EFAULT;
+ done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) {
+ ret = -EBADMSG;
+ goto error;
+ }
+
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+const struct krb5_crypto_profile rfc3961_simplified_profile = {
+ .calc_PRF = rfc3961_calc_PRF,
+ .calc_Kc = rfc3961_calc_DK,
+ .calc_Ke = rfc3961_calc_DK,
+ .calc_Ki = rfc3961_calc_DK,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c
new file mode 100644
index 000000000000..5cbf8f4638b9
--- /dev/null
+++ b/crypto/krb5/rfc3962_aes.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128,
+ .name = "aes128-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256,
+ .name = "aes256-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c
new file mode 100644
index 000000000000..77cd4ce023f1
--- /dev/null
+++ b/crypto/krb5/rfc6803_camellia.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc6803 Camellia Encryption for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant)
+ *
+ * n = ceiling(k / 128)
+ * K(0) = zeros
+ * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k)
+ * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n))
+ * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant))
+ *
+ * [rfc6803 sec 3]
+ */
+static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize, offset, seg;
+ void *buffer;
+ u32 i = 0, k = result->len * 8;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -ENOMEM;
+ K.len = crypto_shash_digestsize(shash);
+ data.len = K.len + 4 + constant->len + 1 + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+
+ K.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len);
+
+ p = data.data + K.len + 4;
+ memcpy(p, constant->data, constant->len);
+ p += constant->len;
+ *p++ = 0x00;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ offset = 0;
+ do {
+ i++;
+ p = data.data;
+ memcpy(p, K.data, K.len);
+ p += K.len;
+ *(__be32 *)p = htonl(i);
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+ ret = crypto_shash_finup(desc, data.data, data.len, K.data);
+ if (ret < 0)
+ goto error;
+
+ seg = min_t(size_t, result->len - offset, K.len);
+ memcpy(result->data + offset, K.data, seg);
+ offset += seg;
+ } while (offset < result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf")
+ * PRF = CMAC(Kp, octet-string)
+ * [rfc6803 sec 6]
+ */
+static int rfc6803_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct crypto_shash *shash;
+ struct krb5_buffer Kp;
+ struct shash_desc *desc;
+ size_t bsize;
+ void *buffer;
+ int ret;
+
+ Kp.len = krb5->prf_len;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+
+ ret = -EINVAL;
+ if (result->len != crypto_shash_digestsize(shash))
+ goto out_shash;
+
+ ret = -ENOMEM;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(Kp.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto out_shash;
+
+ Kp.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant,
+ &Kp, gfp);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_setkey(shash, Kp.data, Kp.len);
+ if (ret < 0)
+ goto out;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data);
+ if (ret < 0)
+ goto out;
+
+out:
+ kfree_sensitive(buffer);
+out_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+
+static const struct krb5_crypto_profile rfc6803_crypto_profile = {
+ .calc_PRF = rfc6803_calc_PRF,
+ .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_camellia128_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128,
+ .name = "camellia128-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
+
+const struct krb5_enctype krb5_camellia256_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256,
+ .name = "camellia256-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c
new file mode 100644
index 000000000000..d39851fc3a4e
--- /dev/null
+++ b/crypto/krb5/rfc8009_aes2.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/authenc.h>
+#include "internal.h"
+
+static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" };
+
+/*
+ * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k)
+ *
+ * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1)
+ *
+ * Using the appropriate one of:
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k)
+ * [rfc8009 sec 3]
+ */
+static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *label,
+ const struct krb5_buffer *context,
+ unsigned int k,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K1, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize;
+ void *buffer;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ if (WARN_ON(result->len != k / 8))
+ return -EINVAL;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -EINVAL;
+ if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k))
+ goto error_shash;
+
+ ret = -ENOMEM;
+ data.len = 4 + label->len + 1 + context->len + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ p = data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ *(__be32 *)p = htonl(0x00000001);
+ p += 4;
+ memcpy(p, label->data, label->len);
+ p += label->len;
+ *p++ = 0;
+ memcpy(p, context->data, context->len);
+ p += context->len;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ K1.len = crypto_shash_digestsize(shash);
+ K1.data = buffer +
+ krb5_shash_size(shash);
+
+ ret = crypto_shash_finup(desc, data.data, data.len, K1.data);
+ if (ret < 0)
+ goto error;
+
+ memcpy(result->data, K1.data, result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256)
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *input_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant,
+ octet_string, krb5->prf_len * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Ke.
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128)
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ke(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->key_bytes * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Kc/Ki
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128)
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ki(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->cksum_len * 8,
+ result, gfp);
+}
+
+/*
+ * Apply encryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ */
+static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv, *ad;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Hash and encrypt the message. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ *
+ * The offset and length are updated to reflect the actual content of the
+ * encrypted region.
+ */
+static int rfc8009_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv, *ad;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Decrypt the message and verify its checksum. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+static const struct krb5_crypto_profile rfc8009_crypto_profile = {
+ .calc_PRF = rfc8009_calc_PRF,
+ .calc_Kc = rfc8009_calc_Ki,
+ .calc_Ke = rfc8009_calc_Ke,
+ .calc_Ki = rfc8009_calc_Ki,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = rfc8009_encrypt,
+ .decrypt = rfc8009_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128,
+ .name = "aes128-cts-hmac-sha256-128",
+ .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha256)",
+ .hash_name = "sha256",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 20,
+ .prf_len = 32,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256,
+ .name = "aes256-cts-hmac-sha384-192",
+ .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha384)",
+ .hash_name = "sha384",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 24,
+ .Ke_len = 32,
+ .Ki_len = 24,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 24,
+ .hash_len = 20,
+ .prf_len = 48,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c
new file mode 100644
index 000000000000..4519c572d37e
--- /dev/null
+++ b/crypto/krb5/selftest.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+#define VALID(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+#define CHECK(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+enum which_key {
+ TEST_KC, TEST_KE, TEST_KI,
+};
+
+#if 0
+static void dump_sg(struct scatterlist *sg, unsigned int limit)
+{
+ unsigned int index = 0, n = 0;
+
+ for (; sg && limit > 0; sg = sg_next(sg)) {
+ unsigned int off = sg->offset, len = umin(sg->length, limit);
+ const void *p = kmap_local_page(sg_page(sg));
+
+ limit -= len;
+ while (len > 0) {
+ unsigned int part = umin(len, 32);
+
+ pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off);
+ index += part;
+ off += part;
+ len -= part;
+ }
+
+ kunmap_local(p);
+ n++;
+ }
+}
+#endif
+
+static int prep_buf(struct krb5_buffer *buf)
+{
+ buf->data = kmalloc(buf->len, GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
+#define PREP_BUF(BUF, LEN) \
+ do { \
+ (BUF)->len = (LEN); \
+ ret = prep_buf((BUF)); \
+ if (ret < 0) \
+ goto out; \
+ } while (0)
+
+static int load_buf(struct krb5_buffer *buf, const char *from)
+{
+ size_t len = strlen(from);
+ int ret;
+
+ if (len > 1 && from[0] == '\'') {
+ PREP_BUF(buf, len - 1);
+ memcpy(buf->data, from + 1, len - 1);
+ ret = 0;
+ goto out;
+ }
+
+ if (VALID(len & 1))
+ return -EINVAL;
+
+ PREP_BUF(buf, len / 2);
+ ret = hex2bin(buf->data, from, buf->len);
+ if (ret < 0) {
+ VALID(1);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0)
+
+static void clear_buf(struct krb5_buffer *buf)
+{
+ kfree(buf->data);
+ buf->len = 0;
+ buf->data = NULL;
+}
+
+/*
+ * Perform a pseudo-random function check.
+ */
+static int krb5_test_one_prf(const struct krb5_prf_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer key = {}, octet = {}, result = {}, prf = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&key, test->key);
+ LOAD_BUF(&octet, test->octet);
+ LOAD_BUF(&prf, test->prf);
+ PREP_BUF(&result, krb5->prf_len);
+
+ if (VALID(result.len != prf.len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("PRF calculation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, prf.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&result);
+ clear_buf(&prf);
+ clear_buf(&octet);
+ clear_buf(&key);
+ return ret;
+}
+
+/*
+ * Perform a key derivation check.
+ */
+static int krb5_test_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_key_test_one *test,
+ enum which_key which)
+{
+ struct krb5_buffer key = {}, result = {};
+ int ret;
+
+ LOAD_BUF(&key, test->key);
+ PREP_BUF(&result, key.len);
+
+ switch (which) {
+ case TEST_KC:
+ ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KE:
+ ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KI:
+ ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ default:
+ VALID(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Key derivation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, key.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+out:
+ clear_buf(&key);
+ clear_buf(&result);
+ return ret;
+}
+
+static int krb5_test_one_key(const struct krb5_key_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer base_key = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&base_key, test->key);
+
+ ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI);
+ if (ret < 0)
+ goto out;
+
+out:
+ clear_buf(&base_key);
+ return ret;
+}
+
+/*
+ * Perform an encryption test.
+ */
+static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {};
+ struct krb5_buffer conf = {}, plain = {}, ct = {};
+ struct scatterlist sg[1];
+ size_t data_len, data_offset, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&conf, test->conf);
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&ct, test->ct);
+
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ } else {
+ LOAD_BUF(&Ke, test->Ke);
+ LOAD_BUF(&Ki, test->Ki);
+
+ ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (VALID(conf.len != krb5->conf_len) ||
+ VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len))
+ goto out;
+
+ data_len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE,
+ data_len, &data_offset);
+
+ if (CHECK(message_len != ct.len)) {
+ pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len);
+ goto out;
+ }
+ if (CHECK(data_offset != conf.len)) {
+ pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len);
+ goto out;
+ }
+
+ memcpy(buf, conf.data, conf.len);
+ memcpy(buf + data_offset, plain.data, plain.len);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0)
+ ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL);
+ else
+ ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL);
+
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret);
+ goto out;
+ }
+
+ /* Encrypt the message. */
+ sg_init_one(sg, buf, message_len);
+ ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len,
+ data_offset, data_len, true);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Encryption failed %d\n", ret);
+ goto out;
+ }
+ if (ret != message_len) {
+ CHECK(1);
+ pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len);
+ goto out;
+ }
+
+ if (memcmp(buf, ct.data, ct.len) != 0) {
+ CHECK(1);
+ pr_warn("Ciphertext mismatch\n");
+ pr_warn("BUF %*phN\n", ct.len, buf);
+ pr_warn("CT %*phN\n", ct.len, ct.data);
+ pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Decrypt the encrypted message. */
+ data_offset = 0;
+ data_len = message_len;
+ ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Decryption failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(data_offset != conf.len) ||
+ CHECK(data_len != plain.len))
+ goto out;
+
+ if (memcmp(buf, conf.data, conf.len) != 0) {
+ CHECK(1);
+ pr_warn("Confounder mismatch\n");
+ pr_warn("ENC %*phN\n", conf.len, buf);
+ pr_warn("DEC %*phN\n", conf.len, conf.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ if (memcmp(buf + conf.len, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + conf.len);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&ct);
+ clear_buf(&plain);
+ clear_buf(&conf);
+ clear_buf(&keys);
+ clear_buf(&Ki);
+ clear_buf(&Ke);
+ clear_buf(&K0);
+ if (ci)
+ crypto_free_aead(ci);
+ return ret;
+}
+
+/*
+ * Perform a checksum test.
+ */
+static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_shash *ci = NULL;
+ struct scatterlist sg[1];
+ struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {};
+ size_t offset, len, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL);
+ } else {
+ LOAD_BUF(&Kc, test->Kc);
+
+ ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL);
+ }
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret);
+ goto out;
+ }
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&mic, test->mic);
+
+ len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE,
+ len, &offset);
+
+ if (CHECK(message_len != mic.len + plain.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n",
+ message_len, mic.len + plain.len);
+ goto out;
+ }
+
+ memcpy(buf + offset, plain.data, plain.len);
+
+ /* Generate a MIC generation request. */
+ sg_init_one(sg, buf, 1024);
+
+ ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024,
+ krb5->cksum_len, plain.len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Get MIC failed %d\n", ret);
+ goto out;
+ }
+ len = ret;
+
+ if (CHECK(len != plain.len + mic.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len);
+ goto out;
+ }
+
+ if (memcmp(buf, mic.data, mic.len) != 0) {
+ CHECK(1);
+ pr_warn("MIC mismatch\n");
+ pr_warn("BUF %*phN\n", mic.len, buf);
+ pr_warn("MIC %*phN\n", mic.len, mic.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Generate a verification request. */
+ offset = 0;
+ ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Verify MIC failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(offset != mic.len) ||
+ CHECK(len != plain.len))
+ goto out;
+
+ if (memcmp(buf + offset, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + offset);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&mic);
+ clear_buf(&plain);
+ clear_buf(&keys);
+ clear_buf(&K0);
+ clear_buf(&Kc);
+ if (ci)
+ crypto_free_shash(ci);
+ return ret;
+}
+
+int krb5_selftest(void)
+{
+ void *buf;
+ int ret = 0, i;
+
+ buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pr_notice("\n");
+ pr_notice("Running selftests\n");
+
+ for (i = 0; krb5_prf_tests[i].name; i++) {
+ ret = krb5_test_one_prf(&krb5_prf_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_prf_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_key_tests[i].name; i++) {
+ ret = krb5_test_one_key(&krb5_key_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_key_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_enc_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_enc(&krb5_enc_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_enc_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_mic_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_mic(&krb5_mic_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_mic_tests[i].name);
+ }
+ }
+
+ ret = 0;
+out:
+ pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed");
+ kfree(buf);
+ return ret;
+}
diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c
new file mode 100644
index 000000000000..24447ee8bf07
--- /dev/null
+++ b/crypto/krb5/selftest_data.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Data for Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+/*
+ * Pseudo-random function tests.
+ */
+const struct krb5_prf_test krb5_prf_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "prf",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .octet = "74657374",
+ .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "prf",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .octet = "74657374",
+ .prf =
+ "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D"
+ "B3C1B62AD1B8553360D17367EB1514D2",
+ },
+ {/* END */}
+};
+
+/*
+ * Key derivation tests.
+ */
+const struct krb5_key_test krb5_key_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "key",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "B31A018A48F54776F403E9A396325DC3",
+ .Ke.use = 0x00000002,
+ .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki.use = 0x00000002,
+ .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "key",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .Kc.use = 0x00000002,
+ .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .Ke.use = 0x00000002,
+ .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki.use = 0x00000002,
+ .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "key",
+ .key = "57D0297298FFD9D35DE5A47FB4BDE24B",
+ .Kc.use = 0x00000002,
+ .Kc.key = "D155775A209D05F02B38D42A389E5A56",
+ .Ke.use = 0x00000002,
+ .Ke.key = "64DF83F85A532F17577D8C37035796AB",
+ .Ki.use = 0x00000002,
+ .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635",
+ },
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "key",
+ .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50",
+ .Ke.use = 0x00000002,
+ .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604",
+ .Ki.use = 0x00000002,
+ .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0",
+ },
+ {/* END */}
+};
+
+/*
+ * Encryption tests.
+ */
+const struct krb5_enc_test krb5_enc_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "7E5895EAF2672435BAD817F545A37148",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "56AB21713FF62C0A1457200F6FA9948F",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "A7A4E29A4728CE10664FB64E49AD3FAC",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "F764E9FA15C276478B2C7D0C4E5F58E4",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "B80D3251C1F6471494256FFE712D0B9A",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "53BF8A0D105265D4E276428624CE5E63",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "763E65367E864F02F55153C7E3B58AF1",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 0,
+ .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "6F2FC3C2A166FD8898967A83DE9596D9",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 1,
+ .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "A5B4A71E077AEEF93C8763C18FDB1F10",
+ .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0",
+ .usage = 2,
+ .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "19FEE40D810C524B5B22F01874C693DA",
+ .K0 = "2CA27A5FAF5532244506434E1CEF6676",
+ .usage = 3,
+ .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "CA7A7AB4BE192DABD603506DB19C39E2",
+ .K0 = "7824F8C16F83FF354C6BF7515B973F43",
+ .usage = 4,
+ .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "3CBBD2B45917941067F96599BB98926C",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 0,
+ .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2",
+ .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833",
+ .usage = 1,
+ .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "AD4FF904D34E555384B14100FC465F88",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 2,
+ .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514",
+ .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715",
+ .usage = 3,
+ .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "644DEF38DA35007275878D216855E228",
+ .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7",
+ .usage = 4,
+ .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474",
+ },
+ {/* END */}
+};
+
+/*
+ * Checksum generation tests.
+ */
+const struct krb5_mic_test krb5_mic_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "B31A018A48F54776F403E9A396325DC3",
+ .mic = "D78367186643D67B411CBA9139FC1DEE",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic abc",
+ .plain = "'abcdefghijk",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 7,
+ .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic ABC",
+ .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 8,
+ .mic = "D1B34F7004A731F23A0C00BF6C3F753A",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic 123",
+ .plain = "'123456789",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 9,
+ .mic = "87A12CFD2B96214810F01C826E7744B1",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic !@#",
+ .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 10,
+ .mic = "3FA0B42355E52B189187294AA252AB64",
+ },
+ {/* END */}
+};
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
new file mode 100644
index 000000000000..a1de55994d92
--- /dev/null
+++ b/crypto/krb5enc.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AEAD wrapper for Kerberos 5 RFC3961 simplified profile.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Derived from authenc:
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct krb5enc_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+ unsigned int reqoff;
+};
+
+struct krb5enc_ctx {
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+};
+
+struct krb5enc_request_ctx {
+ struct scatterlist src[2];
+ struct scatterlist dst[2];
+ char tail[];
+};
+
+static void krb5enc_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/**
+ * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob.
+ * @keys: Where to put the key sizes and pointers
+ * @key: Encoded key material
+ * @keylen: Amount of key material
+ *
+ * Decode the key blob we're given. It starts with an rtattr that indicates
+ * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is:
+ *
+ * rtattr || __be32 enckeylen || authkey || enckey
+ *
+ * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be
+ * handled correctly in static testmgr data.
+ */
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
+{
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
+ return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
+
+ param = RTA_DATA(rta);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
+
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
+
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5enc_extractkeys);
+
+static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int flags = crypto_aead_get_flags(krb5enc);
+ int err = -EINVAL;
+
+ if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0)
+ goto out;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
+ if (err)
+ goto out;
+
+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
+static void krb5enc_encrypt_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Start the encryption of the plaintext. We skip over the associated data as
+ * that only gets included in the hash.
+ */
+static int krb5enc_dispatch_encrypt(struct aead_request *req,
+ unsigned int flags)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ krb5enc_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+/*
+ * Insert the hash into the checksum field in the destination buffer directly
+ * after the encrypted region.
+ */
+static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+
+ scatterwalk_map_and_copy(hash, req->dst,
+ req->assoclen + req->cryptlen,
+ crypto_aead_authsize(krb5enc), 1);
+}
+
+/*
+ * Upon completion of an asynchronous digest, transfer the hash to the checksum
+ * field.
+ */
+static void krb5enc_encrypt_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ krb5enc_insert_checksum(req, ahreq->result);
+
+ err = krb5enc_dispatch_encrypt(req, 0);
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/*
+ * Start the digest of the plaintext for encryption. In theory, this could be
+ * run in parallel with the encryption, provided the src and dst buffers don't
+ * overlap.
+ */
+static int krb5enc_dispatch_encrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct crypto_ahash *auth = ctx->auth;
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_encrypt_ahash_done, req);
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err)
+ return err;
+
+ krb5enc_insert_checksum(req, hash);
+ return 0;
+}
+
+/*
+ * Process an encryption operation. We can perform the cipher and the hash in
+ * parallel, provided the src and dst buffers are separate.
+ */
+static int krb5enc_encrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_encrypt_hash(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_encrypt(req, aead_request_flags(req));
+}
+
+static int krb5enc_verify_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *calc_hash = areq_ctx->tail;
+ u8 *msg_hash = areq_ctx->tail + authsize;
+
+ scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0);
+
+ if (crypto_memneq(msg_hash, calc_hash, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static void krb5enc_decrypt_hash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ err = krb5enc_verify_hash(req);
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Dispatch the hashing of the plaintext after we've done the decryption.
+ */
+static int krb5enc_dispatch_decrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->dst, hash,
+ req->assoclen + req->cryptlen - authsize);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_decrypt_hash_done, req);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err < 0)
+ return err;
+
+ return krb5enc_verify_hash(req);
+}
+
+/*
+ * Dispatch the decryption of the ciphertext.
+ */
+static int krb5enc_dispatch_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ dst = src;
+
+ if (req->src != req->dst)
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(skreq, src, dst,
+ req->cryptlen - authsize, req->iv);
+
+ return crypto_skcipher_decrypt(skreq);
+}
+
+static int krb5enc_decrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_decrypt(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_decrypt_hash(req);
+}
+
+static int krb5enc_init_tfm(struct crypto_aead *tfm)
+{
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+
+ crypto_aead_set_reqsize(
+ tfm,
+ sizeof(struct krb5enc_request_ctx) +
+ ictx->reqoff + /* Space for two checksums */
+ umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth),
+ sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc)));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void krb5enc_exit_tfm(struct crypto_aead *tfm)
+{
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_skcipher(ctx->enc);
+}
+
+static void krb5enc_free(struct aead_instance *inst)
+{
+ struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+/*
+ * Create an instance of a template for a specific hash and cipher pair.
+ */
+static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct krb5enc_instance_ctx *ictx;
+ struct skcipher_alg_common *enc;
+ struct hash_alg_common *auth;
+ struct aead_instance *inst;
+ struct crypto_alg *auth_base;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err) {
+ pr_err("attr_type failed\n");
+ return err;
+ }
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ ictx = aead_instance_ctx(inst);
+
+ err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err) {
+ pr_err("grab ahash failed\n");
+ goto err_free_inst;
+ }
+ auth = crypto_spawn_ahash_alg(&ictx->auth);
+ auth_base = &auth->base;
+
+ err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
+ if (err) {
+ pr_err("grab skcipher failed\n");
+ goto err_free_inst;
+ }
+ enc = crypto_spawn_skcipher_alg_common(&ictx->enc);
+
+ ictx->reqoff = 2 * auth->digestsize;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_name,
+ enc->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_driver_name,
+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
+ auth_base->cra_priority;
+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx);
+
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
+ inst->alg.maxauthsize = auth->digestsize;
+
+ inst->alg.init = krb5enc_init_tfm;
+ inst->alg.exit = krb5enc_exit_tfm;
+
+ inst->alg.setkey = krb5enc_setkey;
+ inst->alg.encrypt = krb5enc_encrypt;
+ inst->alg.decrypt = krb5enc_decrypt;
+
+ inst->free = krb5enc_free;
+
+ err = aead_register_instance(tmpl, inst);
+ if (err) {
+ pr_err("ref failed\n");
+ goto err_free_inst;
+ }
+
+ return 0;
+
+err_free_inst:
+ krb5enc_free(inst);
+ return err;
+}
+
+static struct crypto_template crypto_krb5enc_tmpl = {
+ .name = "krb5enc",
+ .create = krb5enc_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_krb5enc_module_init(void)
+{
+ return crypto_register_template(&crypto_krb5enc_tmpl);
+}
+
+static void __exit crypto_krb5enc_module_exit(void)
+{
+ crypto_unregister_template(&crypto_krb5enc_tmpl);
+}
+
+module_init(crypto_krb5enc_module_init);
+module_exit(crypto_krb5enc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961");
+MODULE_ALIAS_CRYPTO("krb5enc");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 8d59a66b6525..dd403b800513 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
+ const be128 *wsrc;
be128 *wdst;
wsrc = w.src.virt.addr;
@@ -205,9 +205,9 @@ static int lrw_xor_tweak_post(struct skcipher_request *req)
return lrw_xor_tweak(req, true);
}
-static void lrw_crypt_done(struct crypto_async_request *areq, int err)
+static void lrw_crypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
@@ -299,8 +299,8 @@ static void lrw_free_instance(struct skcipher_instance *inst)
static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
u32 mask;
@@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -336,13 +336,13 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alg = crypto_skcipher_spawn_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
goto err_free_inst;
- if (crypto_skcipher_alg_ivsize(alg))
+ if (alg->ivsize)
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
@@ -356,11 +356,11 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ if (!memcmp(cipher_name, "ecb(", 4)) {
+ int len;
- len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
- if (len < 2 || len >= sizeof(ecb_name))
+ len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+ if (len < 2)
goto err_free_inst;
if (ecb_name[len - 1] != ')')
@@ -382,10 +382,8 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
(__alignof__(be128) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
- LRW_BLOCK_SIZE;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
- LRW_BLOCK_SIZE;
+ inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE;
+ inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
@@ -422,7 +420,7 @@ static void __exit lrw_module_exit(void)
crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(lrw_module_init);
+module_init(lrw_module_init);
module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
new file mode 100644
index 000000000000..c2e2c38b5aa8
--- /dev/null
+++ b/crypto/lskcipher.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linear symmetric key cipher operations.
+ *
+ * Generic encrypt/decrypt wrapper for ciphers.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <linux/cryptouser.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+#include "skcipher.h"
+
+static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_lskcipher, base);
+}
+
+static inline struct lskcipher_alg *__crypto_lskcipher_alg(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct lskcipher_alg, co.base);
+}
+
+static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
+ u8 *buffer, *alignbuffer;
+ unsigned long absize;
+ int ret;
+
+ absize = keylen + alignmask;
+ buffer = kmalloc(absize, GFP_ATOMIC);
+ if (!buffer)
+ return -ENOMEM;
+
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = cipher->setkey(tfm, alignbuffer, keylen);
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
+
+ if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
+ return -EINVAL;
+
+ if ((unsigned long)key & alignmask)
+ return lskcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ return cipher->setkey(tfm, key, keylen);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
+
+static int crypto_lskcipher_crypt_unaligned(
+ struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
+ u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags))
+{
+ unsigned statesize = crypto_lskcipher_statesize(tfm);
+ unsigned ivsize = crypto_lskcipher_ivsize(tfm);
+ unsigned bs = crypto_lskcipher_blocksize(tfm);
+ unsigned cs = crypto_lskcipher_chunksize(tfm);
+ int err;
+ u8 *tiv;
+ u8 *p;
+
+ BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
+ MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
+
+ tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!tiv)
+ return -ENOMEM;
+
+ memcpy(tiv, iv, ivsize + statesize);
+
+ p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ err = -ENOMEM;
+ if (!p)
+ goto out;
+
+ while (len >= bs) {
+ unsigned chunk = min((unsigned)PAGE_SIZE, len);
+ int err;
+
+ if (chunk > cs)
+ chunk &= ~(cs - 1);
+
+ memcpy(p, src, chunk);
+ err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+ if (err)
+ goto out;
+
+ memcpy(dst, p, chunk);
+ src += chunk;
+ dst += chunk;
+ len -= chunk;
+ }
+
+ err = len ? -EINVAL : 0;
+
+out:
+ memcpy(iv, tiv, ivsize + statesize);
+ kfree_sensitive(p);
+ kfree_sensitive(tiv);
+ return err;
+}
+
+static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv,
+ int (*crypt)(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst,
+ unsigned len, u8 *iv,
+ u32 flags))
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+
+ if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
+ alignmask)
+ return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
+ crypt);
+
+ return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+}
+
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
+
+static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
+ int (*crypt)(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst,
+ unsigned len, u8 *ivs,
+ u32 flags))
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ u8 *ivs = skcipher_request_ctx(req);
+ struct crypto_lskcipher *tfm = *ctx;
+ struct skcipher_walk walk;
+ unsigned ivsize;
+ u32 flags;
+ int err;
+
+ ivsize = crypto_lskcipher_ivsize(tfm);
+ ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
+ memcpy(ivs, req->iv, ivsize);
+
+ flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
+ flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
+
+ if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
+ flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, ivs,
+ flags & ~(walk.nbytes == walk.total ?
+ 0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
+ err = skcipher_walk_done(&walk, err);
+ flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
+ }
+
+ memcpy(req->iv, ivs, ivsize);
+
+ return err;
+}
+
+int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
+
+ return crypto_lskcipher_crypt_sg(req, alg->encrypt);
+}
+
+int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
+
+ return crypto_lskcipher_crypt_sg(req, alg->decrypt);
+}
+
+static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
+
+ alg->exit(skcipher);
+}
+
+static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
+
+ if (alg->exit)
+ skcipher->base.exit = crypto_lskcipher_exit_tfm;
+
+ if (alg->init)
+ return alg->init(skcipher);
+
+ return 0;
+}
+
+static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
+{
+ struct lskcipher_instance *skcipher =
+ container_of(inst, struct lskcipher_instance, s.base);
+
+ skcipher->free(skcipher);
+}
+
+static void __maybe_unused crypto_lskcipher_show(
+ struct seq_file *m, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+
+ seq_printf(m, "type : lskcipher\n");
+ seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
+ seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
+ seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
+ seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
+ seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
+ seq_printf(m, "statesize : %u\n", skcipher->co.statesize);
+}
+
+static int __maybe_unused crypto_lskcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+ struct crypto_report_blkcipher rblkcipher;
+
+ memset(&rblkcipher, 0, sizeof(rblkcipher));
+
+ strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
+ strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = skcipher->co.min_keysize;
+ rblkcipher.max_keysize = skcipher->co.max_keysize;
+ rblkcipher.ivsize = skcipher->co.ivsize;
+
+ return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(rblkcipher), &rblkcipher);
+}
+
+static const struct crypto_type crypto_lskcipher_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_lskcipher_init_tfm,
+ .free = crypto_lskcipher_free_instance,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_lskcipher_show,
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
+ .report = crypto_lskcipher_report,
+#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_LSKCIPHER,
+ .tfmsize = offsetof(struct crypto_lskcipher, base),
+ .algsize = offsetof(struct lskcipher_alg, co.base),
+};
+
+static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_lskcipher(*ctx);
+}
+
+int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_lskcipher *skcipher;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
+ if (IS_ERR(skcipher)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(skcipher);
+ }
+
+ *ctx = skcipher;
+ tfm->exit = crypto_lskcipher_exit_tfm_sg;
+
+ return 0;
+}
+
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_lskcipher_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
+
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
+
+static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->co.base;
+ int err;
+
+ err = skcipher_prepare_alg_common(&alg->co);
+ if (err)
+ return err;
+
+ if (alg->co.chunksize & (alg->co.chunksize - 1))
+ return -EINVAL;
+
+ base->cra_type = &crypto_lskcipher_type;
+ base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
+
+ return 0;
+}
+
+int crypto_register_lskcipher(struct lskcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->co.base;
+ int err;
+
+ err = lskcipher_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
+
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
+{
+ crypto_unregister_alg(&alg->co.base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
+
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_lskcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_lskcipher(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
+
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_lskcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
+
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst)
+{
+ int err;
+
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
+ err = lskcipher_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(lskcipher_register_instance);
+
+static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
+
+ crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ return crypto_lskcipher_setkey(cipher, key, keylen);
+}
+
+static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
+{
+ struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher_spawn *spawn;
+ struct crypto_lskcipher *cipher;
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher = crypto_spawn_lskcipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ *ctx = cipher;
+ return 0;
+}
+
+static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ crypto_free_lskcipher(*ctx);
+}
+
+static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
+{
+ crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+/**
+ * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
+ *
+ * Allocate an lskcipher_instance for a simple block cipher mode of operation,
+ * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
+ * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
+ * alignmask, and priority are set from the underlying cipher but can be
+ * overridden if needed. The tfm context defaults to
+ * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
+ * ->exit() methods are installed.
+ *
+ * @tmpl: the template being instantiated
+ * @tb: the template parameters
+ *
+ * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
+ * needs to register the instance.
+ */
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ struct lskcipher_instance *inst;
+ struct crypto_lskcipher_spawn *spawn;
+ char ecb_name[CRYPTO_MAX_ALG_NAME];
+ struct lskcipher_alg *cipher_alg;
+ const char *cipher_name;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return ERR_CAST(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+
+ spawn = lskcipher_instance_ctx(inst);
+ err = crypto_grab_lskcipher(spawn,
+ lskcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
+
+ ecb_name[0] = 0;
+ if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_lskcipher(spawn,
+ lskcipher_crypto_instance(inst),
+ ecb_name, 0, mask);
+ }
+
+ if (err)
+ goto err_free_inst;
+
+ cipher_alg = crypto_lskcipher_spawn_alg(spawn);
+
+ err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
+ &cipher_alg->co.base);
+ if (err)
+ goto err_free_inst;
+
+ if (ecb_name[0]) {
+ int len;
+
+ err = -EINVAL;
+ len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
+ sizeof(ecb_name));
+ if (len < 2)
+ goto err_free_inst;
+
+ if (ecb_name[len - 1] != ')')
+ goto err_free_inst;
+
+ ecb_name[len - 1] = 0;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, ecb_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (strcmp(ecb_name, cipher_name) &&
+ snprintf(inst->alg.co.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, cipher_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+ } else {
+ /* Don't allow nesting. */
+ err = -ELOOP;
+ if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
+ goto err_free_inst;
+ }
+
+ err = -EINVAL;
+ if (cipher_alg->co.ivsize)
+ goto err_free_inst;
+
+ inst->free = lskcipher_free_instance_simple;
+
+ /* Default algorithm properties, can be overridden */
+ inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
+ inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
+ inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
+ inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
+ inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
+ inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
+ inst->alg.co.statesize = cipher_alg->co.statesize;
+
+ /* Use struct crypto_lskcipher * by default, can be overridden */
+ inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
+ inst->alg.setkey = lskcipher_setkey_simple;
+ inst->alg.init = lskcipher_init_tfm_simple;
+ inst->alg.exit = lskcipher_exit_tfm_simple;
+
+ return inst;
+
+err_free_inst:
+ lskcipher_free_instance_simple(inst);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 0606f8862e78..57b713516aef 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -12,11 +12,7 @@
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
-struct lz4_ctx {
- void *lz4_comp_mem;
-};
-
-static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4_alloc_ctx(void)
{
void *ctx;
@@ -27,29 +23,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4_init(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4_exit(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4_free_ctx(NULL, ctx->lz4_comp_mem);
-}
-
static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -70,14 +48,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
-}
-
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -97,29 +67,11 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4 = {
- .cra_name = "lz4",
- .cra_driver_name = "lz4-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4_init,
- .cra_exit = lz4_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4_compress_crypto,
- .coa_decompress = lz4_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
- .alloc_ctx = lz4_alloc_ctx,
- .free_ctx = lz4_free_ctx,
+ .streams = {
+ .alloc_ctx = lz4_alloc_ctx,
+ .free_ctx = lz4_free_ctx,
+ },
.compress = lz4_scompress,
.decompress = lz4_sdecompress,
.base = {
@@ -131,28 +83,15 @@ static struct scomp_alg scomp = {
static int __init lz4_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4_mod_init);
+module_init(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index d7cc94aa2fcf..bb84f8a68cb5 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -4,18 +4,13 @@
*
* Copyright (c) 2013 Chanho Min <chanho.min@lge.com>
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
-#include <crypto/internal/scompress.h>
-
-struct lz4hc_ctx {
- void *lz4hc_comp_mem;
-};
-static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4hc_alloc_ctx(void)
{
void *ctx;
@@ -26,29 +21,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4hc_init(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4hc_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4hc_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4hc_exit(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -69,16 +46,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4hc_compress_crypto(src, slen, dst, dlen,
- ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -98,29 +65,11 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4hc = {
- .cra_name = "lz4hc",
- .cra_driver_name = "lz4hc-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4hc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4hc_init,
- .cra_exit = lz4hc_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4hc_compress_crypto,
- .coa_decompress = lz4hc_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
- .alloc_ctx = lz4hc_alloc_ctx,
- .free_ctx = lz4hc_free_ctx,
+ .streams = {
+ .alloc_ctx = lz4hc_alloc_ctx,
+ .free_ctx = lz4hc_free_ctx,
+ },
.compress = lz4hc_scompress,
.decompress = lz4hc_sdecompress,
.base = {
@@ -132,28 +81,15 @@ static struct scomp_alg scomp = {
static int __init lz4hc_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4hc);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4hc);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4hc_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4hc);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4hc_mod_init);
+module_init(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index 0631d975bfac..794e7ec49536 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -3,19 +3,13 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
-
-struct lzorle_ctx {
- void *lzorle_comp_mem;
-};
+#include <linux/module.h>
+#include <linux/slab.h>
-static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzorle_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +20,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzorle_init(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzorle_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzorle_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzorle_exit(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzorle_free_ctx(NULL, ctx->lzorle_comp_mem);
-}
-
static int __lzorle_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +40,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem);
-}
-
static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +62,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzorle_decompress(src, slen, dst, dlen);
-}
-
static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,22 +69,11 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzorle_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo-rle",
- .cra_driver_name = "lzo-rle-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzorle_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzorle_init,
- .cra_exit = lzorle_exit,
- .cra_u = { .compress = {
- .coa_compress = lzorle_compress,
- .coa_decompress = lzorle_decompress } }
-};
-
static struct scomp_alg scomp = {
- .alloc_ctx = lzorle_alloc_ctx,
- .free_ctx = lzorle_free_ctx,
+ .streams = {
+ .alloc_ctx = lzorle_alloc_ctx,
+ .free_ctx = lzorle_free_ctx,
+ },
.compress = lzorle_scompress,
.decompress = lzorle_sdecompress,
.base = {
@@ -134,28 +85,15 @@ static struct scomp_alg scomp = {
static int __init lzorle_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzorle_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzorle_mod_init);
+module_init(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index ebda132dd22b..d43242b24b4e 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -3,19 +3,13 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
-
-struct lzo_ctx {
- void *lzo_comp_mem;
-};
+#include <linux/module.h>
+#include <linux/slab.h>
-static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzo_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +20,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzo_init(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzo_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzo_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzo_exit(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzo_free_ctx(NULL, ctx->lzo_comp_mem);
-}
-
static int __lzo_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +40,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
-}
-
static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +62,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzo_decompress(src, slen, dst, dlen);
-}
-
static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,22 +69,11 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzo_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo",
- .cra_driver_name = "lzo-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzo_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzo_init,
- .cra_exit = lzo_exit,
- .cra_u = { .compress = {
- .coa_compress = lzo_compress,
- .coa_decompress = lzo_decompress } }
-};
-
static struct scomp_alg scomp = {
- .alloc_ctx = lzo_alloc_ctx,
- .free_ctx = lzo_free_ctx,
+ .streams = {
+ .alloc_ctx = lzo_alloc_ctx,
+ .free_ctx = lzo_free_ctx,
+ },
.compress = lzo_scompress,
.decompress = lzo_sdecompress,
.base = {
@@ -134,28 +85,15 @@ static struct scomp_alg scomp = {
static int __init lzo_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzo_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzo_mod_init);
+module_init(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 2e7f2f319f95..55bf47e23c13 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md4_mod_init);
+module_init(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 72c0c46fb5ee..c167d203c710 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -1,27 +1,62 @@
-/*
- * Cryptographic API.
- *
- * MD5 Message Digest Algorithm (RFC1321).
- *
- * Derived from cryptoapi implementation, originally based on the
- * public domain implementation written by Colin Plumb in 1993.
- *
- * Copyright (c) Cryptoapi developers.
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for MD5 and HMAC-MD5
*
+ * Copyright 2025 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
+
+/*
+ * Export and import functions. crypto_shash wants a particular format that
+ * matches that used by some legacy drivers. It currently is the same as the
+ * library MD5 context, except the value in bytecount must be block-aligned and
+ * the remainder must be stored in an extra u8 appended to the struct.
+ */
+
+#define MD5_SHASH_STATE_SIZE (sizeof(struct md5_ctx) + 1)
+static_assert(sizeof(struct md5_ctx) == sizeof(struct md5_state));
+static_assert(offsetof(struct md5_ctx, state) == offsetof(struct md5_state, hash));
+static_assert(offsetof(struct md5_ctx, bytecount) == offsetof(struct md5_state, byte_count));
+static_assert(offsetof(struct md5_ctx, buf) == offsetof(struct md5_state, block));
+
+static int __crypto_md5_export(const struct md5_ctx *ctx0, void *out)
+{
+ struct md5_ctx ctx = *ctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = ctx.bytecount % MD5_BLOCK_SIZE;
+ ctx.bytecount -= partial;
+ memcpy(p, &ctx, sizeof(ctx));
+ p += sizeof(ctx);
+ *p = partial;
+ return 0;
+}
+
+static int __crypto_md5_import(struct md5_ctx *ctx, const void *in)
+{
+ const u8 *p = in;
+
+ memcpy(ctx, p, sizeof(*ctx));
+ p += sizeof(*ctx);
+ ctx->bytecount += *p;
+ return 0;
+}
+
+static int __crypto_md5_export_core(const struct md5_ctx *ctx, void *out)
+{
+ memcpy(out, ctx, offsetof(struct md5_ctx, buf));
+ return 0;
+}
+
+static int __crypto_md5_import_core(struct md5_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct md5_ctx, buf));
+ return 0;
+}
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
@@ -29,222 +64,173 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-#define MD5STEP(f, w, x, y, z, in, s) \
- (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
-
-static void md5_transform(__u32 *hash, __u32 const *in)
-{
- u32 a, b, c, d;
-
- a = hash[0];
- b = hash[1];
- c = hash[2];
- d = hash[3];
-
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
- hash[0] += a;
- hash[1] += b;
- hash[2] += c;
- hash[3] += d;
-}
-
-static inline void md5_transform_helper(struct md5_state *ctx)
-{
- le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
- md5_transform(ctx->hash, ctx->block);
-}
-
-static int md5_init(struct shash_desc *desc)
-{
- struct md5_state *mctx = shash_desc_ctx(desc);
-
- mctx->hash[0] = MD5_H0;
- mctx->hash[1] = MD5_H1;
- mctx->hash[2] = MD5_H2;
- mctx->hash[3] = MD5_H3;
- mctx->byte_count = 0;
+#define MD5_CTX(desc) ((struct md5_ctx *)shash_desc_ctx(desc))
+static int crypto_md5_init(struct shash_desc *desc)
+{
+ md5_init(MD5_CTX(desc));
return 0;
}
-static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+static int crypto_md5_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ md5_update(MD5_CTX(desc), data, len);
+ return 0;
+}
- mctx->byte_count += len;
+static int crypto_md5_final(struct shash_desc *desc, u8 *out)
+{
+ md5_final(MD5_CTX(desc), out);
+ return 0;
+}
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
+static int crypto_md5_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ md5(data, len, out);
+ return 0;
+}
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, avail);
+static int crypto_md5_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_md5_export(MD5_CTX(desc), out);
+}
- md5_transform_helper(mctx);
- data += avail;
- len -= avail;
+static int crypto_md5_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_md5_import(MD5_CTX(desc), in);
+}
- while (len >= sizeof(mctx->block)) {
- memcpy(mctx->block, data, sizeof(mctx->block));
- md5_transform_helper(mctx);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
+static int crypto_md5_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_md5_export_core(MD5_CTX(desc), out);
+}
- memcpy(mctx->block, data, len);
+static int crypto_md5_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_md5_import_core(MD5_CTX(desc), in);
+}
+
+#define HMAC_MD5_KEY(tfm) ((struct hmac_md5_key *)crypto_shash_ctx(tfm))
+#define HMAC_MD5_CTX(desc) ((struct hmac_md5_ctx *)shash_desc_ctx(desc))
+static int crypto_hmac_md5_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_md5_preparekey(HMAC_MD5_KEY(tfm), raw_key, keylen);
return 0;
}
-static int md5_final(struct shash_desc *desc, u8 *out)
+static int crypto_hmac_md5_init(struct shash_desc *desc)
{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
- int padding = 56 - (offset + 1);
+ hmac_md5_init(HMAC_MD5_CTX(desc), HMAC_MD5_KEY(desc->tfm));
+ return 0;
+}
- *p++ = 0x80;
- if (padding < 0) {
- memset(p, 0x00, padding + sizeof (u64));
- md5_transform_helper(mctx);
- p = (char *)mctx->block;
- padding = 56;
- }
+static int crypto_hmac_md5_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_md5_update(HMAC_MD5_CTX(desc), data, len);
+ return 0;
+}
- memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
- md5_transform(mctx->hash, mctx->block);
- cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
- memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
+static int crypto_hmac_md5_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_md5_final(HMAC_MD5_CTX(desc), out);
+ return 0;
+}
+static int crypto_hmac_md5_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ hmac_md5(HMAC_MD5_KEY(desc->tfm), data, len, out);
return 0;
}
-static int md5_export(struct shash_desc *desc, void *out)
+static int crypto_hmac_md5_export(struct shash_desc *desc, void *out)
{
- struct md5_state *ctx = shash_desc_ctx(desc);
+ return __crypto_md5_export(&HMAC_MD5_CTX(desc)->hash_ctx, out);
+}
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
+static int crypto_hmac_md5_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc);
+
+ ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate;
+ return __crypto_md5_import(&ctx->hash_ctx, in);
}
-static int md5_import(struct shash_desc *desc, const void *in)
+static int crypto_hmac_md5_export_core(struct shash_desc *desc, void *out)
{
- struct md5_state *ctx = shash_desc_ctx(desc);
+ return __crypto_md5_export_core(&HMAC_MD5_CTX(desc)->hash_ctx, out);
+}
- memcpy(ctx, in, sizeof(*ctx));
- return 0;
+static int crypto_hmac_md5_import_core(struct shash_desc *desc, const void *in)
+{
+ struct hmac_md5_ctx *ctx = HMAC_MD5_CTX(desc);
+
+ ctx->ostate = HMAC_MD5_KEY(desc->tfm)->ostate;
+ return __crypto_md5_import_core(&ctx->hash_ctx, in);
}
-static struct shash_alg alg = {
- .digestsize = MD5_DIGEST_SIZE,
- .init = md5_init,
- .update = md5_update,
- .final = md5_final,
- .export = md5_export,
- .import = md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
- .base = {
- .cra_name = "md5",
- .cra_driver_name = "md5-generic",
- .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "md5",
+ .base.cra_driver_name = "md5-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = MD5_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = MD5_DIGEST_SIZE,
+ .init = crypto_md5_init,
+ .update = crypto_md5_update,
+ .final = crypto_md5_final,
+ .digest = crypto_md5_digest,
+ .export = crypto_md5_export,
+ .import = crypto_md5_import,
+ .export_core = crypto_md5_export_core,
+ .import_core = crypto_md5_import_core,
+ .descsize = sizeof(struct md5_ctx),
+ .statesize = MD5_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(md5)",
+ .base.cra_driver_name = "hmac-md5-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = MD5_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_md5_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = MD5_DIGEST_SIZE,
+ .setkey = crypto_hmac_md5_setkey,
+ .init = crypto_hmac_md5_init,
+ .update = crypto_hmac_md5_update,
+ .final = crypto_hmac_md5_final,
+ .digest = crypto_hmac_md5_digest,
+ .export = crypto_hmac_md5_export,
+ .import = crypto_hmac_md5_import,
+ .export_core = crypto_hmac_md5_export_core,
+ .import_core = crypto_hmac_md5_import_core,
+ .descsize = sizeof(struct hmac_md5_ctx),
+ .statesize = MD5_SHASH_STATE_SIZE,
+ },
};
-static int __init md5_mod_init(void)
+static int __init crypto_md5_mod_init(void)
{
- return crypto_register_shash(&alg);
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
+module_init(crypto_md5_mod_init);
-static void __exit md5_mod_fini(void)
+static void __exit crypto_md5_mod_exit(void)
{
- crypto_unregister_shash(&alg);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-
-subsys_initcall(md5_mod_init);
-module_exit(md5_mod_fini);
+module_exit(crypto_md5_mod_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
+MODULE_DESCRIPTION("Crypto API support for MD5 and HMAC-MD5");
+
MODULE_ALIAS_CRYPTO("md5");
+MODULE_ALIAS_CRYPTO("md5-lib");
+MODULE_ALIAS_CRYPTO("hmac(md5)");
+MODULE_ALIAS_CRYPTO("hmac-md5-lib");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index f4c31049601c..69ad35f524d7 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -7,7 +7,7 @@
* Copyright (c) 2004 Jouni Malinen <j@w1.fi>
*/
#include <crypto/internal/hash.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
@@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void)
}
-subsys_initcall(michael_mic_init);
+module_init(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index 8a3006c3b51b..2b648615b5ec 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -30,7 +30,7 @@
* (https://cr.yp.to/mac/poly1305-20050329.pdf)
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
@@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-subsys_initcall(nhpoly1305_mod_init);
+module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/ofb.c b/crypto/ofb.c
deleted file mode 100644
index b630fdecceee..000000000000
--- a/crypto/ofb.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * OFB: Output FeedBack mode
- *
- * Copyright (C) 2018 ARM Limited or its affiliates.
- * All rights reserved.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-static int crypto_ofb_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- const unsigned int bsize = crypto_cipher_blocksize(cipher);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes >= bsize) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- u8 * const iv = walk.iv;
- unsigned int nbytes = walk.nbytes;
-
- do {
- crypto_cipher_encrypt_one(cipher, iv, iv);
- crypto_xor_cpy(dst, src, iv, bsize);
- dst += bsize;
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- if (walk.nbytes) {
- crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
- crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
- return err;
-}
-
-static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- inst = skcipher_alloc_instance_simple(tmpl, tb);
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- alg = skcipher_ialg_simple(inst);
-
- /* OFB mode is a stream cipher. */
- inst->alg.base.cra_blocksize = 1;
-
- /*
- * To simplify the implementation, configure the skcipher walk to only
- * give a partial block at the very end, never earlier.
- */
- inst->alg.chunksize = alg->cra_blocksize;
-
- inst->alg.encrypt = crypto_ofb_crypt;
- inst->alg.decrypt = crypto_ofb_crypt;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err)
- inst->free(inst);
-
- return err;
-}
-
-static struct crypto_template crypto_ofb_tmpl = {
- .name = "ofb",
- .create = crypto_ofb_create,
- .module = THIS_MODULE,
-};
-
-static int __init crypto_ofb_module_init(void)
-{
- return crypto_register_template(&crypto_ofb_tmpl);
-}
-
-static void __exit crypto_ofb_module_exit(void)
-{
- crypto_unregister_template(&crypto_ofb_tmpl);
-}
-
-subsys_initcall(crypto_ofb_module_init);
-module_exit(crypto_ofb_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("OFB block cipher mode of operation");
-MODULE_ALIAS_CRYPTO("ofb");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 7030f59e46b6..d092717ea4fc 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
- memcpy(tmpbuf, src, bsize);
- crypto_xor(iv, src, bsize);
- crypto_cipher_encrypt_one(tfm, src, iv);
- crypto_xor_cpy(iv, tmpbuf, src, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_xor(iv, dst, bsize);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
+ crypto_xor_cpy(iv, tmpbuf, dst, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -71,7 +71,7 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
cipher);
@@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
- memcpy(tmpbuf, src, bsize);
- crypto_cipher_decrypt_one(tfm, src, src);
- crypto_xor(src, iv, bsize);
- crypto_xor_cpy(iv, src, tmpbuf, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_cipher_decrypt_one(tfm, dst, dst);
+ crypto_xor(dst, iv, bsize);
+ crypto_xor_cpy(iv, dst, tmpbuf, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -138,7 +138,7 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
cipher);
@@ -186,10 +186,10 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-subsys_initcall(crypto_pcbc_module_init);
+module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("pcbc");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 9d10b846ccf7..c3a9d4f2995c 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -63,9 +63,9 @@ static void pcrypt_aead_serial(struct padata_priv *padata)
aead_request_complete(req->base.data, padata->info);
}
-static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
+static void pcrypt_aead_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
@@ -117,6 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
+ if (err == -EBUSY) {
+ /* try non-parallel mode */
+ return crypto_aead_encrypt(creq);
+ }
return err;
}
@@ -164,13 +168,17 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
+ if (err == -EBUSY) {
+ /* try non-parallel mode */
+ return crypto_aead_decrypt(creq);
+ }
return err;
}
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
{
- int cpu, cpu_index;
+ int cpu_index;
struct aead_instance *inst = aead_alg_instance(tfm);
struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -179,10 +187,7 @@ static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
cpumask_weight(cpu_online_mask);
- ctx->cb_cpu = cpumask_first(cpu_online_mask);
- for (cpu = 0; cpu < cpu_index; cpu++)
- ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
-
+ ctx->cb_cpu = cpumask_nth(cpu_index, cpu_online_mask);
cipher = crypto_spawn_aead(&ictx->spawn);
if (IS_ERR(cipher))
@@ -373,7 +378,7 @@ static void __exit pcrypt_exit(void)
kset_unregister(pcrypt_kset);
}
-subsys_initcall(pcrypt_init);
+module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
deleted file mode 100644
index 94af47eb6fa6..000000000000
--- a/crypto/poly1305_generic.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Poly1305 authenticator algorithm, RFC7539
- *
- * Copyright (C) 2015 Martin Willi
- *
- * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/unaligned.h>
-
-static int crypto_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(&dctx->core_r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 2;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
-
-static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int srclen)
-{
- unsigned int datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
-
- poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
- srclen / POLY1305_BLOCK_SIZE, 1);
-}
-
-static int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- poly1305_blocks(dctx, src, srclen);
- src += srclen - (srclen % POLY1305_BLOCK_SIZE);
- srclen %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_generic(dctx, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_init,
- .update = crypto_poly1305_update,
- .final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-generic",
- .cra_priority = 100,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_mod_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_mod_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-subsys_initcall(poly1305_mod_init);
-module_exit(poly1305_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("Poly1305 authenticator");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-generic");
diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
deleted file mode 100644
index 16bfa6925b31..000000000000
--- a/crypto/polyval-generic.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * POLYVAL: hash function for HCTR2.
- *
- * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
- * Copyright (c) 2009 Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- * Copyright 2021 Google LLC
- */
-
-/*
- * Code based on crypto/ghash-generic.c
- *
- * POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different
- * modulus for finite field multiplication which makes hardware accelerated
- * implementations on little-endian machines faster. POLYVAL is used in the
- * kernel to implement HCTR2, but was originally specified for AES-GCM-SIV
- * (RFC 8452).
- *
- * For more information see:
- * Length-preserving encryption with HCTR2:
- * https://eprint.iacr.org/2021/1441.pdf
- * AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption:
- * https://datatracker.ietf.org/doc/html/rfc8452
- *
- * Like GHASH, POLYVAL is not a cryptographic hash function and should
- * not be used outside of crypto modes explicitly designed to use POLYVAL.
- *
- * This implementation uses a convenient trick involving the GHASH and POLYVAL
- * fields. This trick allows multiplication in the POLYVAL field to be
- * implemented by using multiplication in the GHASH field as a subroutine. An
- * element of the POLYVAL field can be converted to an element of the GHASH
- * field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of
- * a. Similarly, an element of the GHASH field can be converted back to the
- * POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see:
- * https://datatracker.ietf.org/doc/html/rfc8452#appendix-A
- *
- * By using this trick, we do not need to implement the POLYVAL field for the
- * generic implementation.
- *
- * Warning: this generic implementation is not intended to be used in practice
- * and is not constant time. For practical use, a hardware accelerated
- * implementation of POLYVAL should be used instead.
- *
- */
-
-#include <asm/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/polyval.h>
-#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-struct polyval_tfm_ctx {
- struct gf128mul_4k *gf128;
-};
-
-struct polyval_desc_ctx {
- union {
- u8 buffer[POLYVAL_BLOCK_SIZE];
- be128 buffer128;
- };
- u32 bytes;
-};
-
-static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
- const u8 src[POLYVAL_BLOCK_SIZE])
-{
- u64 a = get_unaligned((const u64 *)&src[0]);
- u64 b = get_unaligned((const u64 *)&src[8]);
-
- put_unaligned(swab64(a), (u64 *)&dst[8]);
- put_unaligned(swab64(b), (u64 *)&dst[0]);
-}
-
-/*
- * Performs multiplication in the POLYVAL field using the GHASH field as a
- * subroutine. This function is used as a fallback for hardware accelerated
- * implementations when simd registers are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation for finite field multiplication.
- */
-void polyval_mul_non4k(u8 *op1, const u8 *op2)
-{
- be128 a, b;
-
- // Assume one argument is in Montgomery form and one is not.
- copy_and_reverse((u8 *)&a, op1);
- copy_and_reverse((u8 *)&b, op2);
- gf128mul_x_lle(&a, &a);
- gf128mul_lle(&a, &b);
- copy_and_reverse(op1, (u8 *)&a);
-}
-EXPORT_SYMBOL_GPL(polyval_mul_non4k);
-
-/*
- * Perform a POLYVAL update using non4k multiplication. This function is used
- * as a fallback for hardware accelerated implementations when simd registers
- * are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation of finite field multiplication.
- */
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator)
-{
- while (nblocks--) {
- crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
- polyval_mul_non4k(accumulator, key);
- in += POLYVAL_BLOCK_SIZE;
- }
-}
-EXPORT_SYMBOL_GPL(polyval_update_non4k);
-
-static int polyval_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
- be128 k;
-
- if (keylen != POLYVAL_BLOCK_SIZE)
- return -EINVAL;
-
- gf128mul_free_4k(ctx->gf128);
-
- BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE);
- copy_and_reverse((u8 *)&k, key);
- gf128mul_x_lle(&k, &k);
-
- ctx->gf128 = gf128mul_init_4k_lle(&k);
- memzero_explicit(&k, POLYVAL_BLOCK_SIZE);
-
- if (!ctx->gf128)
- return -ENOMEM;
-
- return 0;
-}
-
-static int polyval_init(struct shash_desc *desc)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-
- memset(dctx, 0, sizeof(*dctx));
-
- return 0;
-}
-
-static int polyval_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- u8 *pos;
- u8 tmp[POLYVAL_BLOCK_SIZE];
- int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + dctx->bytes - 1;
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos-- ^= *src++;
-
- if (!dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
- copy_and_reverse(tmp, src);
- crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- src += POLYVAL_BLOCK_SIZE;
- srclen -= POLYVAL_BLOCK_SIZE;
- }
-
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
- while (srclen--)
- *pos-- ^= *src++;
- }
-
- return 0;
-}
-
-static int polyval_final(struct shash_desc *desc, u8 *dst)
-{
- struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
-
- if (dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- copy_and_reverse(dst, dctx->buffer);
- return 0;
-}
-
-static void polyval_exit_tfm(struct crypto_tfm *tfm)
-{
- struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
-
- gf128mul_free_4k(ctx->gf128);
-}
-
-static struct shash_alg polyval_alg = {
- .digestsize = POLYVAL_DIGEST_SIZE,
- .init = polyval_init,
- .update = polyval_update,
- .final = polyval_final,
- .setkey = polyval_setkey,
- .descsize = sizeof(struct polyval_desc_ctx),
- .base = {
- .cra_name = "polyval",
- .cra_driver_name = "polyval-generic",
- .cra_priority = 100,
- .cra_blocksize = POLYVAL_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct polyval_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_exit = polyval_exit_tfm,
- },
-};
-
-static int __init polyval_mod_init(void)
-{
- return crypto_register_shash(&polyval_alg);
-}
-
-static void __exit polyval_mod_exit(void)
-{
- crypto_unregister_shash(&polyval_alg);
-}
-
-subsys_initcall(polyval_mod_init);
-module_exit(polyval_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("POLYVAL hash function");
-MODULE_ALIAS_CRYPTO("polyval");
-MODULE_ALIAS_CRYPTO("polyval-generic");
diff --git a/crypto/proc.c b/crypto/proc.c
index 12fccb9c5205..82f15b967e85 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -11,6 +11,7 @@
#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
#include <linux/module.h> /* for module_name() */
#include <linux/rwsem.h>
#include <linux/proc_fs.h>
@@ -46,8 +47,10 @@ static int c_show(struct seq_file *m, void *p)
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
seq_printf(m, "internal : %s\n",
- (alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
- "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL));
+ if (fips_enabled)
+ seq_printf(m, "fips : %s\n",
+ str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL));
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
@@ -69,9 +72,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- seq_printf(m, "type : compression\n");
- break;
default:
seq_printf(m, "type : unknown\n");
break;
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index c5fe4034b153..9860b60c9be4 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -9,18 +9,14 @@
* Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
+#include <linux/string.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
- __le32 buffer[16];
};
#define K1 RMD_K1
@@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc)
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
return 0;
}
static int rmd160_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ int remain = len - round_down(len, RMD160_BLOCK_SIZE);
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
+ __le32 buffer[RMD160_BLOCK_SIZE / 4];
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
+ rctx->byte_count += len - remain;
- rmd160_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd160_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
+ do {
+ memcpy(buffer, data, sizeof(buffer));
+ rmd160_transform(rctx->state, buffer);
+ data += sizeof(buffer);
+ len -= sizeof(buffer);
+ } while (len >= sizeof(buffer));
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
+ memzero_explicit(buffer, sizeof(buffer));
+ return remain;
}
/* Add padding and return the message digest. */
-static int rmd160_final(struct shash_desc *desc, u8 *out)
+static int rmd160_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
+ unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1;
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
+ union {
+ __le64 l64[RMD160_BLOCK_SIZE / 4];
+ __le32 l32[RMD160_BLOCK_SIZE / 2];
+ u8 u8[RMD160_BLOCK_SIZE * 2];
+ } block = {};
__le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd160_update(desc, padding, padlen);
+ u32 i;
- /* Append length */
- rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
+ rctx->byte_count += len;
+ if (len >= bit_offset * 8)
+ bit_offset += RMD160_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3);
+
+ rmd160_transform(rctx->state, block.l32);
+ if (bit_offset > RMD160_BLOCK_SIZE / 8)
+ rmd160_transform(rctx->state,
+ block.l32 + RMD160_BLOCK_SIZE / 4);
+ memzero_explicit(&block, sizeof(block));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
return 0;
}
@@ -338,11 +321,12 @@ static struct shash_alg alg = {
.digestsize = RMD160_DIGEST_SIZE,
.init = rmd160_init,
.update = rmd160_update,
- .final = rmd160_final,
+ .finup = rmd160_finup,
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(rmd160_mod_init);
+module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rng.c b/crypto/rng.c
index fea082b25fe4..ee1768c5a400 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -8,17 +8,17 @@
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <linux/atomic.h>
#include <crypto/internal/rng.h>
+#include <linux/atomic.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
#include "internal.h"
@@ -30,7 +30,6 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
u8 *buf = NULL;
int err;
@@ -45,9 +44,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
seed = buf;
}
- crypto_stats_get(alg);
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- crypto_stats_rng_seed(alg, err);
out:
kfree_sensitive(buf);
return err;
@@ -66,8 +63,8 @@ static unsigned int seedsize(struct crypto_alg *alg)
return ralg->seedsize;
}
-#ifdef CONFIG_NET
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_rng_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
@@ -79,12 +76,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
-#else
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -100,11 +91,14 @@ static const struct crypto_type crypto_rng_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_rng_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
.tfmsize = offsetof(struct crypto_rng, base),
+ .algsize = offsetof(struct rng_alg, base),
};
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
@@ -174,6 +168,11 @@ out:
EXPORT_SYMBOL_GPL(crypto_del_default_rng);
#endif
+static void rng_default_set_ent(struct crypto_rng *tfm, const u8 *data,
+ unsigned int len)
+{
+}
+
int crypto_register_rng(struct rng_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -185,6 +184,9 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+ if (!alg->set_ent)
+ alg->set_ent = rng_default_set_ent;
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 3285e3af43e1..50bdb18e7b48 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -16,78 +16,6 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
-/*
- * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
- */
-static const u8 rsa_digest_info_md5[] = {
- 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
- 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
- 0x05, 0x00, 0x04, 0x10
-};
-
-static const u8 rsa_digest_info_sha1[] = {
- 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
- 0x2b, 0x0e, 0x03, 0x02, 0x1a,
- 0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 rsa_digest_info_rmd160[] = {
- 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
- 0x2b, 0x24, 0x03, 0x02, 0x01,
- 0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 rsa_digest_info_sha224[] = {
- 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
- 0x05, 0x00, 0x04, 0x1c
-};
-
-static const u8 rsa_digest_info_sha256[] = {
- 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
- 0x05, 0x00, 0x04, 0x20
-};
-
-static const u8 rsa_digest_info_sha384[] = {
- 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
- 0x05, 0x00, 0x04, 0x30
-};
-
-static const u8 rsa_digest_info_sha512[] = {
- 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
- 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
- 0x05, 0x00, 0x04, 0x40
-};
-
-static const struct rsa_asn1_template {
- const char *name;
- const u8 *data;
- size_t size;
-} rsa_asn1_templates[] = {
-#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
- _(md5),
- _(sha1),
- _(rmd160),
- _(sha256),
- _(sha384),
- _(sha512),
- _(sha224),
- { NULL }
-#undef _
-};
-
-static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
-{
- const struct rsa_asn1_template *p;
-
- for (p = rsa_asn1_templates; p->name; p++)
- if (strcmp(name, p->name) == 0)
- return p;
- return NULL;
-}
-
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
@@ -95,7 +23,6 @@ struct pkcs1pad_ctx {
struct pkcs1pad_inst_ctx {
struct crypto_akcipher_spawn spawn;
- const struct rsa_asn1_template *digest_info;
};
struct pkcs1pad_request {
@@ -108,42 +35,16 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- int err;
- ctx->key_size = 0;
-
- err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
- if (err)
- return err;
-
- /* Find out new modulus size from rsa implementation */
- err = crypto_akcipher_maxsize(ctx->child);
- if (err > PAGE_SIZE)
- return -ENOTSUPP;
-
- ctx->key_size = err;
- return 0;
+ return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen);
}
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- int err;
-
- ctx->key_size = 0;
-
- err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
- if (err)
- return err;
- /* Find out new modulus size from rsa implementation */
- err = crypto_akcipher_maxsize(ctx->child);
- if (err > PAGE_SIZE)
- return -ENOTSUPP;
-
- ctx->key_size = err;
- return 0;
+ return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen);
}
static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
@@ -151,9 +52,9 @@ static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
- * The maximum destination buffer size for the encrypt/sign operations
+ * The maximum destination buffer size for the encrypt operation
* will be the same as for RSA, even though it's smaller for
- * decrypt/verify.
+ * decrypt.
*/
return ctx->key_size;
@@ -171,7 +72,7 @@ static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
sg_chain(sg, nsegs, next);
}
-static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
+static int pkcs1pad_encrypt_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
@@ -190,7 +91,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
if (likely(!pad_len))
goto out;
- out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
+ out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
err = -ENOMEM;
if (!out_buf)
goto out;
@@ -210,20 +111,17 @@ out:
return err;
}
-static void pkcs1pad_encrypt_sign_complete_cb(
- struct crypto_async_request *child_async_req, int err)
+static void pkcs1pad_encrypt_complete_cb(void *data, int err)
{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
+ struct akcipher_request *req = data;
if (err == -EINPROGRESS)
- return;
+ goto out;
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req,
- pkcs1pad_encrypt_sign_complete(req, err));
+ err = pkcs1pad_encrypt_complete(req, err);
+
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
@@ -253,7 +151,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
ps_end = ctx->key_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x02;
for (i = 1; i < ps_end; i++)
- req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+ req_ctx->in_buf[i] = get_random_u32_inclusive(1, 255);
req_ctx->in_buf[ps_end] = 0x00;
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
@@ -261,7 +159,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
- pkcs1pad_encrypt_sign_complete_cb, req);
+ pkcs1pad_encrypt_complete_cb, req);
/* Reuse output buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
@@ -269,7 +167,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
- return pkcs1pad_encrypt_sign_complete(req, err);
+ return pkcs1pad_encrypt_complete(req, err);
return err;
}
@@ -328,19 +226,17 @@ done:
return err;
}
-static void pkcs1pad_decrypt_complete_cb(
- struct crypto_async_request *child_async_req, int err)
+static void pkcs1pad_decrypt_complete_cb(void *data, int err)
{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
+ struct akcipher_request *req = data;
if (err == -EINPROGRESS)
- return;
+ goto out;
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+ err = pkcs1pad_decrypt_complete(req, err);
+
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
@@ -376,197 +272,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
return err;
}
-static int pkcs1pad_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- struct akcipher_instance *inst = akcipher_alg_instance(tfm);
- struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
- const struct rsa_asn1_template *digest_info = ictx->digest_info;
- int err;
- unsigned int ps_end, digest_info_size = 0;
-
- if (!ctx->key_size)
- return -EINVAL;
-
- if (digest_info)
- digest_info_size = digest_info->size;
-
- if (req->src_len + digest_info_size > ctx->key_size - 11)
- return -EOVERFLOW;
-
- if (req->dst_len < ctx->key_size) {
- req->dst_len = ctx->key_size;
- return -EOVERFLOW;
- }
-
- req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
- GFP_KERNEL);
- if (!req_ctx->in_buf)
- return -ENOMEM;
-
- ps_end = ctx->key_size - digest_info_size - req->src_len - 2;
- req_ctx->in_buf[0] = 0x01;
- memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
- req_ctx->in_buf[ps_end] = 0x00;
-
- if (digest_info)
- memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
- digest_info->size);
-
- pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
- ctx->key_size - 1 - req->src_len, req->src);
-
- akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
- akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
- pkcs1pad_encrypt_sign_complete_cb, req);
-
- /* Reuse output buffer */
- akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
- req->dst, ctx->key_size - 1, req->dst_len);
-
- err = crypto_akcipher_decrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS && err != -EBUSY)
- return pkcs1pad_encrypt_sign_complete(req, err);
-
- return err;
-}
-
-static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- struct akcipher_instance *inst = akcipher_alg_instance(tfm);
- struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
- const struct rsa_asn1_template *digest_info = ictx->digest_info;
- const unsigned int sig_size = req->src_len;
- const unsigned int digest_size = req->dst_len;
- unsigned int dst_len;
- unsigned int pos;
- u8 *out_buf;
-
- if (err)
- goto done;
-
- err = -EINVAL;
- dst_len = req_ctx->child_req.dst_len;
- if (dst_len < ctx->key_size - 1)
- goto done;
-
- out_buf = req_ctx->out_buf;
- if (dst_len == ctx->key_size) {
- if (out_buf[0] != 0x00)
- /* Decrypted value had no leading 0 byte */
- goto done;
-
- dst_len--;
- out_buf++;
- }
-
- err = -EBADMSG;
- if (out_buf[0] != 0x01)
- goto done;
-
- for (pos = 1; pos < dst_len; pos++)
- if (out_buf[pos] != 0xff)
- break;
-
- if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
- goto done;
- pos++;
-
- if (digest_info) {
- if (digest_info->size > dst_len - pos)
- goto done;
- if (crypto_memneq(out_buf + pos, digest_info->data,
- digest_info->size))
- goto done;
-
- pos += digest_info->size;
- }
-
- err = 0;
-
- if (digest_size != dst_len - pos) {
- err = -EKEYREJECTED;
- req->dst_len = dst_len - pos;
- goto done;
- }
- /* Extract appended digest. */
- sg_pcopy_to_buffer(req->src,
- sg_nents_for_len(req->src, sig_size + digest_size),
- req_ctx->out_buf + ctx->key_size,
- digest_size, sig_size);
- /* Do the actual verification step. */
- if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
- digest_size) != 0)
- err = -EKEYREJECTED;
-done:
- kfree_sensitive(req_ctx->out_buf);
-
- return err;
-}
-
-static void pkcs1pad_verify_complete_cb(
- struct crypto_async_request *child_async_req, int err)
-{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
-
- if (err == -EINPROGRESS)
- return;
-
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
-}
-
-/*
- * The verify operation is here for completeness similar to the verification
- * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
- * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
- * retrieve the DigestInfo from a signature, instead the user is expected
- * to call the sign operation to generate the expected signature and compare
- * signatures instead of the message-digests.
- */
-static int pkcs1pad_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
- const unsigned int sig_size = req->src_len;
- const unsigned int digest_size = req->dst_len;
- int err;
-
- if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
- !ctx->key_size || sig_size != ctx->key_size)
- return -EINVAL;
-
- req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL);
- if (!req_ctx->out_buf)
- return -ENOMEM;
-
- pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size, NULL);
-
- akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
- akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
- pkcs1pad_verify_complete_cb, req);
-
- /* Reuse input buffer, output to a new buffer */
- akcipher_request_set_crypt(&req_ctx->child_req, req->src,
- req_ctx->out_sg, sig_size, ctx->key_size);
-
- err = crypto_akcipher_encrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS && err != -EBUSY)
- return pkcs1pad_verify_complete(req, err);
-
- return err;
-}
-
static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
{
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
@@ -579,6 +284,10 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
+
+ akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) +
+ crypto_akcipher_reqsize(child_tfm));
+
return 0;
}
@@ -604,7 +313,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
struct akcipher_alg *rsa_alg;
- const char *hash_name;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
@@ -630,36 +338,15 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
}
err = -ENAMETOOLONG;
- hash_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(hash_name)) {
- if (snprintf(inst->alg.base.cra_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_free_inst;
-
- if (snprintf(inst->alg.base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
- rsa_alg->base.cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_free_inst;
- } else {
- ctx->digest_info = rsa_lookup_asn1(hash_name);
- if (!ctx->digest_info) {
- err = -EINVAL;
- goto err_free_inst;
- }
-
- if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
- hash_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_free_inst;
-
- if (snprintf(inst->alg.base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
- rsa_alg->base.cra_driver_name,
- hash_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_free_inst;
- }
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
@@ -669,12 +356,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = pkcs1pad_encrypt;
inst->alg.decrypt = pkcs1pad_decrypt;
- inst->alg.sign = pkcs1pad_sign;
- inst->alg.verify = pkcs1pad_verify;
inst->alg.set_pub_key = pkcs1pad_set_pub_key;
inst->alg.set_priv_key = pkcs1pad_set_priv_key;
inst->alg.max_size = pkcs1pad_get_max_size;
- inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
inst->free = pkcs1pad_free;
@@ -691,3 +375,5 @@ struct crypto_template rsa_pkcs1pad_tmpl = {
.create = pkcs1pad_create,
.module = THIS_MODULE,
};
+
+MODULE_ALIAS_CRYPTO("pkcs1pad");
diff --git a/crypto/rsa.c b/crypto/rsa.c
index c50f2d2a4d06..6c7734083c98 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -24,14 +24,38 @@ struct rsa_mpi_key {
MPI qinv;
};
+static int rsa_check_payload(MPI x, MPI n)
+{
+ MPI n1;
+
+ if (mpi_cmp_ui(x, 1) <= 0)
+ return -EINVAL;
+
+ n1 = mpi_alloc(0);
+ if (!n1)
+ return -ENOMEM;
+
+ if (mpi_sub_ui(n1, n, 1) || mpi_cmp(x, n1) >= 0) {
+ mpi_free(n1);
+ return -EINVAL;
+ }
+
+ mpi_free(n1);
+ return 0;
+}
+
/*
* RSAEP function [RFC3447 sec 5.1.1]
* c = m^e mod n;
*/
static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
{
- /* (1) Validate 0 <= m < n */
- if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
+ /*
+ * Even though (1) in RFC3447 only requires 0 <= m <= n - 1, we are
+ * slightly more conservative and require 1 < m < n - 1. This is in line
+ * with SP 800-56Br2, Section 7.1.1.
+ */
+ if (rsa_check_payload(m, key->n))
return -EINVAL;
/* (2) c = m^e mod n */
@@ -50,8 +74,12 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
MPI m2, m12_or_qh;
int ret = -ENOMEM;
- /* (1) Validate 0 <= c < n */
- if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
+ /*
+ * Even though (1) in RFC3447 only requires 0 <= c <= n - 1, we are
+ * slightly more conservative and require 1 < c < n - 1. This is in line
+ * with SP 800-56Br2, Section 7.1.2.
+ */
+ if (rsa_check_payload(c, key->n))
return -EINVAL;
m2 = mpi_alloc(0);
@@ -70,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
goto err_free_mpi;
/* (2iii) h = (m_1 - m_2) * qInv mod p */
- mpi_sub(m12_or_qh, m_or_m1_or_h, m2);
- mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
+ ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?:
+ mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
/* (2iv) m = m_2 + q * h */
- mpi_mul(m12_or_qh, key->q, m_or_m1_or_h);
- mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
-
- ret = 0;
+ ret = ret ?:
+ mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?:
+ mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
err_free_mpi:
mpi_free(m12_or_qh);
@@ -205,6 +232,40 @@ static int rsa_check_key_length(unsigned int len)
return -EINVAL;
}
+static int rsa_check_exponent_fips(MPI e)
+{
+ MPI e_max = NULL;
+ int err;
+
+ /* check if odd */
+ if (!mpi_test_bit(e, 0)) {
+ return -EINVAL;
+ }
+
+ /* check if 2^16 < e < 2^256. */
+ if (mpi_cmp_ui(e, 65536) <= 0) {
+ return -EINVAL;
+ }
+
+ e_max = mpi_alloc(0);
+ if (!e_max)
+ return -ENOMEM;
+
+ err = mpi_set_bit(e_max, 256);
+ if (err) {
+ mpi_free(e_max);
+ return err;
+ }
+
+ if (mpi_cmp(e, e_max) >= 0) {
+ mpi_free(e_max);
+ return -EINVAL;
+ }
+
+ mpi_free(e_max);
+ return 0;
+}
+
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
@@ -232,6 +293,11 @@ static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return -EINVAL;
}
+ if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
+ }
+
return 0;
err:
@@ -290,6 +356,11 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return -EINVAL;
}
+ if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
+ }
+
return 0;
err:
@@ -336,21 +407,30 @@ static int __init rsa_init(void)
return err;
err = crypto_register_template(&rsa_pkcs1pad_tmpl);
- if (err) {
- crypto_unregister_akcipher(&rsa);
- return err;
- }
+ if (err)
+ goto err_unregister_rsa;
+
+ err = crypto_register_template(&rsassa_pkcs1_tmpl);
+ if (err)
+ goto err_unregister_rsa_pkcs1pad;
return 0;
+
+err_unregister_rsa_pkcs1pad:
+ crypto_unregister_template(&rsa_pkcs1pad_tmpl);
+err_unregister_rsa:
+ crypto_unregister_akcipher(&rsa);
+ return err;
}
static void __exit rsa_exit(void)
{
+ crypto_unregister_template(&rsassa_pkcs1_tmpl);
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}
-subsys_initcall(rsa_init);
+module_init(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1
index 4ce06758e8af..76865124a9c7 100644
--- a/crypto/rsaprivkey.asn1
+++ b/crypto/rsaprivkey.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2016 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.2
+
RsaPrivKey ::= SEQUENCE {
version INTEGER,
n INTEGER ({ rsa_get_n }),
diff --git a/crypto/rsapubkey.asn1 b/crypto/rsapubkey.asn1
index 725498e461d2..0d32b1ca6270 100644
--- a/crypto/rsapubkey.asn1
+++ b/crypto/rsapubkey.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2016 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.1
+
RsaPubKey ::= SEQUENCE {
n INTEGER ({ rsa_get_n }),
e INTEGER ({ rsa_get_e })
diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c
new file mode 100644
index 000000000000..94fa5e9600e7
--- /dev/null
+++ b/crypto/rsassa-pkcs1.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2)
+ *
+ * https://www.rfc-editor.org/rfc/rfc8017#section-8.2
+ *
+ * Copyright (c) 2015 - 2024 Intel Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sig.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/sig.h>
+
+/*
+ * Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24)
+ *
+ * RSA keys are usually much larger than the hash of the message to be signed.
+ * The hash is therefore prepended by the Full Hash Prefix and a 0xff padding.
+ * The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID.
+ *
+ * https://www.rfc-editor.org/rfc/rfc9580#table-24
+ */
+
+static const u8 hash_prefix_none[] = { };
+
+static const u8 hash_prefix_md5[] = {
+ 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */
+ 0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */
+};
+
+static const u8 hash_prefix_sha1[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x0e, 0x03, 0x02, 0x1a,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 hash_prefix_rmd160[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x24, 0x03, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 hash_prefix_sha224[] = {
+ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
+ 0x05, 0x00, 0x04, 0x1c
+};
+
+static const u8 hash_prefix_sha256[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 hash_prefix_sha384[] = {
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
+ 0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 hash_prefix_sha512[] = {
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
+ 0x05, 0x00, 0x04, 0x40
+};
+
+static const u8 hash_prefix_sha3_256[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08,
+ 0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 hash_prefix_sha3_384[] = {
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09,
+ 0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 hash_prefix_sha3_512[] = {
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a,
+ 0x05, 0x00, 0x04, 0x40
+};
+
+static const struct hash_prefix {
+ const char *name;
+ const u8 *data;
+ size_t size;
+} hash_prefixes[] = {
+#define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) }
+ _(none),
+ _(md5),
+ _(sha1),
+ _(rmd160),
+ _(sha256),
+ _(sha384),
+ _(sha512),
+ _(sha224),
+#undef _
+#define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) }
+ _(256),
+ _(384),
+ _(512),
+#undef _
+ { NULL }
+};
+
+static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name)
+{
+ const struct hash_prefix *p;
+
+ for (p = hash_prefixes; p->name; p++)
+ if (strcmp(name, p->name) == 0)
+ return p;
+ return NULL;
+}
+
+static bool rsassa_pkcs1_invalid_hash_len(unsigned int len,
+ const struct hash_prefix *p)
+{
+ /*
+ * Legacy protocols such as TLS 1.1 or earlier and IKE version 1
+ * do not prepend a Full Hash Prefix to the hash. In that case,
+ * the size of the Full Hash Prefix is zero.
+ */
+ if (p->data == hash_prefix_none)
+ return false;
+
+ /*
+ * The final byte of the Full Hash Prefix encodes the hash length.
+ *
+ * This needs to be revisited should hash algorithms with more than
+ * 1016 bits (127 bytes * 8) ever be added. The length would then
+ * be encoded into more than one byte by ASN.1.
+ */
+ static_assert(HASH_MAX_DIGESTSIZE <= 127);
+
+ return len != p->data[p->size - 1];
+}
+
+struct rsassa_pkcs1_ctx {
+ struct crypto_akcipher *child;
+ unsigned int key_size;
+};
+
+struct rsassa_pkcs1_inst_ctx {
+ struct crypto_akcipher_spawn spawn;
+ const struct hash_prefix *hash_prefix;
+};
+
+static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct sig_instance *inst = sig_alg_instance(tfm);
+ struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
+ const struct hash_prefix *hash_prefix = ictx->hash_prefix;
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+ unsigned int pad_len;
+ unsigned int ps_end;
+ unsigned int len;
+ u8 *in_buf;
+ int err;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (dlen < ctx->key_size)
+ return -EOVERFLOW;
+
+ if (rsassa_pkcs1_invalid_hash_len(slen, hash_prefix))
+ return -EINVAL;
+
+ if (slen + hash_prefix->size > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ pad_len = ctx->key_size - slen - hash_prefix->size - 1;
+
+ /* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
+ in_buf = dst;
+ memmove(in_buf + pad_len + hash_prefix->size, src, slen);
+ memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size);
+
+ ps_end = pad_len - 1;
+ in_buf[0] = 0x01;
+ memset(in_buf + 1, 0xff, ps_end - 1);
+ in_buf[ps_end] = 0x00;
+
+
+ /* RFC 8017 sec 8.2.1 step 2 - RSA signature */
+ err = crypto_akcipher_sync_decrypt(ctx->child, in_buf,
+ ctx->key_size - 1, in_buf,
+ ctx->key_size);
+ if (err < 0)
+ return err;
+
+ len = err;
+ pad_len = ctx->key_size - len;
+
+ /* Four billion to one */
+ if (unlikely(pad_len)) {
+ memmove(dst + pad_len, dst, len);
+ memset(dst, 0, pad_len);
+ }
+
+ return ctx->key_size;
+}
+
+static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct sig_instance *inst = sig_alg_instance(tfm);
+ struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
+ const struct hash_prefix *hash_prefix = ictx->hash_prefix;
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+ unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
+ struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
+ struct crypto_wait cwait;
+ struct scatterlist sg;
+ unsigned int dst_len;
+ unsigned int pos;
+ u8 *out_buf;
+ int err;
+
+ /* RFC 8017 sec 8.2.2 step 1 - length checking */
+ if (!ctx->key_size ||
+ slen != ctx->key_size ||
+ rsassa_pkcs1_invalid_hash_len(dlen, hash_prefix))
+ return -EINVAL;
+
+ /* RFC 8017 sec 8.2.2 step 2 - RSA verification */
+ child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size,
+ GFP_KERNEL);
+ if (!child_req)
+ return -ENOMEM;
+
+ out_buf = (u8 *)(child_req + 1) + child_reqsize;
+ memcpy(out_buf, src, slen);
+
+ crypto_init_wait(&cwait);
+ sg_init_one(&sg, out_buf, slen);
+ akcipher_request_set_tfm(child_req, ctx->child);
+ akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen);
+ akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &cwait);
+
+ err = crypto_akcipher_encrypt(child_req);
+ err = crypto_wait_req(err, &cwait);
+ if (err)
+ return err;
+
+ /* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */
+ dst_len = child_req->dst_len;
+ if (dst_len < ctx->key_size - 1)
+ return -EINVAL;
+
+ if (dst_len == ctx->key_size) {
+ if (out_buf[0] != 0x00)
+ /* Encrypted value had no leading 0 byte */
+ return -EINVAL;
+
+ dst_len--;
+ out_buf++;
+ }
+
+ if (out_buf[0] != 0x01)
+ return -EBADMSG;
+
+ for (pos = 1; pos < dst_len; pos++)
+ if (out_buf[pos] != 0xff)
+ break;
+
+ if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
+ return -EBADMSG;
+ pos++;
+
+ if (hash_prefix->size > dst_len - pos)
+ return -EBADMSG;
+ if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size))
+ return -EBADMSG;
+ pos += hash_prefix->size;
+
+ /* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */
+ if (dlen != dst_len - pos)
+ return -EKEYREJECTED;
+ if (memcmp(digest, out_buf + pos, dlen) != 0)
+ return -EKEYREJECTED;
+
+ return 0;
+}
+
+static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
+{
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return ctx->key_size * BITS_PER_BYTE;
+}
+
+static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen);
+}
+
+static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+
+ return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen);
+}
+
+static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm)
+{
+ struct sig_instance *inst = sig_alg_instance(tfm);
+ struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+ struct crypto_akcipher *child_tfm;
+
+ child_tfm = crypto_spawn_akcipher(&ictx->spawn);
+ if (IS_ERR(child_tfm))
+ return PTR_ERR(child_tfm);
+
+ ctx->child = child_tfm;
+
+ return 0;
+}
+
+static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm)
+{
+ struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
+
+ crypto_free_akcipher(ctx->child);
+}
+
+static void rsassa_pkcs1_free(struct sig_instance *inst)
+{
+ struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst);
+ struct crypto_akcipher_spawn *spawn = &ctx->spawn;
+
+ crypto_drop_akcipher(spawn);
+ kfree(inst);
+}
+
+static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct rsassa_pkcs1_inst_ctx *ctx;
+ struct akcipher_alg *rsa_alg;
+ struct sig_instance *inst;
+ const char *hash_name;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = sig_instance_ctx(inst);
+
+ err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+
+ rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
+
+ if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
+ err = -EINVAL;
+ goto err_free_inst;
+ }
+
+ hash_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(hash_name)) {
+ err = PTR_ERR(hash_name);
+ goto err_free_inst;
+ }
+
+ ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name);
+ if (!ctx->hash_prefix) {
+ err = -EINVAL;
+ goto err_free_inst;
+ }
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1(%s,%s)", rsa_alg->base.cra_name,
+ hash_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1(%s,%s)", rsa_alg->base.cra_driver_name,
+ hash_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
+ inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx);
+
+ inst->alg.init = rsassa_pkcs1_init_tfm;
+ inst->alg.exit = rsassa_pkcs1_exit_tfm;
+
+ inst->alg.sign = rsassa_pkcs1_sign;
+ inst->alg.verify = rsassa_pkcs1_verify;
+ inst->alg.key_size = rsassa_pkcs1_key_size;
+ inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key;
+ inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key;
+
+ inst->free = rsassa_pkcs1_free;
+
+ err = sig_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ rsassa_pkcs1_free(inst);
+ }
+ return err;
+}
+
+struct crypto_template rsassa_pkcs1_tmpl = {
+ .name = "pkcs1",
+ .create = rsassa_pkcs1_create,
+ .module = THIS_MODULE,
+};
+
+MODULE_ALIAS_CRYPTO("pkcs1");
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 16f6ba896fb6..be0e24843806 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -15,59 +15,170 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
-static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
- void *src = out ? buf : sgdata;
- void *dst = out ? sgdata : buf;
+ struct scatterlist *sg = walk->sg;
- memcpy(dst, src, nbytes);
+ nbytes += walk->offset - sg->offset;
+
+ while (nbytes > sg->length) {
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + nbytes;
}
+EXPORT_SYMBOL_GPL(scatterwalk_skip);
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out)
+inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes)
{
- for (;;) {
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- u8 *vaddr;
+ do {
+ unsigned int to_copy;
+
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(buf, walk->addr, to_copy);
+ scatterwalk_done_src(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk);
- if (len_this_page > nbytes)
- len_this_page = nbytes;
+inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes)
+{
+ do {
+ unsigned int to_copy;
+
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(walk->addr, buf, to_copy);
+ scatterwalk_done_dst(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk);
- if (out != 2) {
- vaddr = scatterwalk_map(walk);
- memcpy_dir(buf, vaddr, len_this_page, out);
- scatterwalk_unmap(vaddr);
- }
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes)
+{
+ struct scatter_walk walk;
- scatterwalk_advance(walk, len_this_page);
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
- if (nbytes == len_this_page)
- break;
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_from_scatterwalk(buf, &walk, nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_from_sglist);
- buf += len_this_page;
- nbytes -= len_this_page;
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes)
+{
+ struct scatter_walk walk;
- scatterwalk_pagedone(walk, out & 1, 1);
- }
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
+
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_to_scatterwalk(&walk, buf, nbytes);
}
-EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
+EXPORT_SYMBOL_GPL(memcpy_to_sglist);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out)
+/**
+ * memcpy_sglist() - Copy data from one scatterlist to another
+ * @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
+ * @src: The source scatterlist. Can be NULL if @nbytes == 0.
+ * @nbytes: Number of bytes to copy
+ *
+ * The scatterlists can describe exactly the same memory, in which case this
+ * function is a no-op. No other overlaps are supported.
+ *
+ * Context: Any context
+ */
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct scatter_walk walk;
- struct scatterlist tmp[2];
+ unsigned int src_offset, dst_offset;
- if (!nbytes)
+ if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
return;
- sg = scatterwalk_ffwd(tmp, sg, start);
+ src_offset = src->offset;
+ dst_offset = dst->offset;
+ for (;;) {
+ /* Compute the length to copy this step. */
+ unsigned int len = min3(src->offset + src->length - src_offset,
+ dst->offset + dst->length - dst_offset,
+ nbytes);
+ struct page *src_page = sg_page(src);
+ struct page *dst_page = sg_page(dst);
+ const void *src_virt;
+ void *dst_virt;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ /* HIGHMEM: we may have to actually map the pages. */
+ const unsigned int src_oip = offset_in_page(src_offset);
+ const unsigned int dst_oip = offset_in_page(dst_offset);
+ const unsigned int limit = PAGE_SIZE;
+
+ /* Further limit len to not cross a page boundary. */
+ len = min3(len, limit - src_oip, limit - dst_oip);
+
+ /* Compute the source and destination pages. */
+ src_page += src_offset / PAGE_SIZE;
+ dst_page += dst_offset / PAGE_SIZE;
+
+ if (src_page != dst_page) {
+ /* Copy between different pages. */
+ memcpy_page(dst_page, dst_oip,
+ src_page, src_oip, len);
+ flush_dcache_page(dst_page);
+ } else if (src_oip != dst_oip) {
+ /* Copy between different parts of same page. */
+ dst_virt = kmap_local_page(dst_page);
+ memcpy(dst_virt + dst_oip, dst_virt + src_oip,
+ len);
+ kunmap_local(dst_virt);
+ flush_dcache_page(dst_page);
+ } /* Else, it's the same memory. No action needed. */
+ } else {
+ /*
+ * !HIGHMEM: no mapping needed. Just work in the linear
+ * buffer of each sg entry. Note that we can cross page
+ * boundaries, as they are not significant in this case.
+ */
+ src_virt = page_address(src_page) + src_offset;
+ dst_virt = page_address(dst_page) + dst_offset;
+ if (src_virt != dst_virt) {
+ memcpy(dst_virt, src_virt, len);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(
+ dst_page, dst_offset, len);
+ } /* Else, it's the same memory. No action needed. */
+ }
+ nbytes -= len;
+ if (nbytes == 0) /* No more to copy? */
+ break;
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
+ /*
+ * There's more to copy. Advance the offsets by the length
+ * copied this step, and advance the sg entries as needed.
+ */
+ src_offset += len;
+ if (src_offset >= src->offset + src->length) {
+ src = sg_next(src);
+ src_offset = src->offset;
+ }
+ dst_offset += len;
+ if (dst_offset >= dst->offset + dst->length) {
+ dst = sg_next(dst);
+ dst_offset = dst->offset;
+ }
+ }
}
-EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+EXPORT_SYMBOL_GPL(memcpy_sglist);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 738f4f8f0f41..1a7ed8ae65b0 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -6,28 +6,31 @@
* Copyright (c) 2016, Intel Corporation
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <linux/errno.h>
+
+#include <crypto/internal/scompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cpumask.h>
+#include <linux/cryptouser.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <linux/vmalloc.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
- void *src;
- void *dst;
+ union {
+ void *src;
+ unsigned long saddr;
+ };
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -38,8 +41,12 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
-#ifdef CONFIG_NET
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static cpumask_t scomp_scratch_want;
+static void scomp_scratch_workfn(struct work_struct *work);
+static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
+
+static int __maybe_unused crypto_scomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
@@ -50,12 +57,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(rscomp), &rscomp);
}
-#else
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -73,91 +74,206 @@ static void crypto_scomp_free_scratches(void)
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);
- vfree(scratch->src);
- vfree(scratch->dst);
+ free_page(scratch->saddr);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
-static int crypto_scomp_alloc_scratches(void)
+static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
{
- struct scomp_scratch *scratch;
- int i;
+ int node = cpu_to_node(cpu);
+ struct page *page;
+
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_bh(&scratch->lock);
+ scratch->src = page_address(page);
+ spin_unlock_bh(&scratch->lock);
+ return 0;
+}
- for_each_possible_cpu(i) {
- void *mem;
+static void scomp_scratch_workfn(struct work_struct *work)
+{
+ int cpu;
- scratch = per_cpu_ptr(&scomp_scratch, i);
+ for_each_cpu(cpu, &scomp_scratch_want) {
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ if (scratch->src)
+ continue;
+ if (scomp_alloc_scratch(scratch, cpu))
+ break;
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->src = mem;
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
+ cpumask_clear_cpu(cpu, &scomp_scratch_want);
}
- return 0;
-error:
- crypto_scomp_free_scratches();
- return -ENOMEM;
+}
+
+static int crypto_scomp_alloc_scratches(void)
+{
+ unsigned int i = cpumask_first(cpu_possible_mask);
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, i);
+ return scomp_alloc_scratch(scratch, i);
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
+ struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
int ret = 0;
mutex_lock(&scomp_lock);
- if (!scomp_scratch_users++)
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
+ if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
+ if (ret)
+ scomp_scratch_users--;
+ }
+unlock:
mutex_unlock(&scomp_lock);
return ret;
}
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
+{
+ int cpu = raw_smp_processor_id();
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ spin_lock(&scratch->lock);
+ if (likely(scratch->src))
+ return scratch;
+ spin_unlock(&scratch->lock);
+
+ cpumask_set_cpu(cpu, &scomp_scratch_want);
+ schedule_work(&scomp_scratch_work);
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
+ spin_lock(&scratch->lock);
+ return scratch;
+}
+
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
+ __releases(scratch)
+{
+ spin_unlock(&scratch->lock);
+}
+
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
+ bool src_isvirt = acomp_request_src_isvirt(req);
+ bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
- void **ctx = acomp_request_ctx(req);
+ struct crypto_acomp_stream *stream;
struct scomp_scratch *scratch;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+ struct page *spage, *dpage;
+ unsigned int n;
+ const u8 *src;
+ size_t soff;
+ size_t doff;
+ u8 *dst;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ if (!req->src || !slen)
return -EINVAL;
- if (req->dst && !req->dlen)
+ if (!req->dst || !dlen)
return -EINVAL;
- if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
- req->dlen = SCOMP_SCRATCH_SIZE;
+ if (dst_isvirt)
+ dst = req->dvirt;
+ else {
+ if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ return -ENOSYS;
+
+ dpage += doff / PAGE_SIZE;
+ doff = offset_in_page(doff);
+
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(dpage + n) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ return -ENOSYS;
+ dst = kmap_local_page(dpage) + doff;
+ }
- scratch = raw_cpu_ptr(&scomp_scratch);
- spin_lock(&scratch->lock);
+ if (src_isvirt)
+ src = req->svirt;
+ else {
+ src = NULL;
+ do {
+ if (slen <= req->src->length) {
+ spage = sg_page(req->src);
+ soff = req->src->offset;
+ } else
+ break;
+
+ spage = spage + soff / PAGE_SIZE;
+ soff = offset_in_page(soff);
+
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
+ if (PageHighMem(spage + n) &&
+ size_add(soff, slen) > PAGE_SIZE)
+ break;
+ src = kmap_local_page(spage) + soff;
+ } while (0);
+ }
+
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
- scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
- if (dir)
- ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
- scratch->dst, &req->dlen, *ctx);
+ if (!src_isvirt && !src) {
+ const u8 *src;
+
+ scratch = scomp_lock_scratch();
+ src = scratch->src;
+ memcpy_from_sglist(scratch->src, req->src, 0, slen);
+
+ if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ scomp_unlock_scratch(scratch);
+ } else if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
else
- ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
- scratch->dst, &req->dlen, *ctx);
- if (!ret) {
- if (!req->dst) {
- req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ crypto_acomp_unlock_stream_bh(stream);
+
+ req->dlen = dlen;
+
+ if (!src_isvirt && src)
+ kunmap_local(src);
+ if (!dst_isvirt) {
+ kunmap_local(dst);
+ dlen += doff;
+ for (;;) {
+ flush_dcache_page(dpage);
+ if (dlen <= PAGE_SIZE)
+ break;
+ dlen -= PAGE_SIZE;
+ dpage++;
}
- scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
- 1);
}
-out:
- spin_unlock(&scratch->lock);
+
return ret;
}
@@ -177,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
+ flush_work(&scomp_scratch_work);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
@@ -204,62 +321,50 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = sgl_free;
- crt->reqsize = sizeof(void *);
return 0;
}
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx;
-
- ctx = crypto_scomp_alloc_ctx(scomp);
- if (IS_ERR(ctx)) {
- kfree(req);
- return NULL;
- }
-
- *req->__ctx = ctx;
+ struct scomp_alg *scomp = __crypto_scomp_alg(alg);
- return req;
-}
-
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
-{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx = *req->__ctx;
-
- if (ctx)
- crypto_scomp_free_ctx(scomp, ctx);
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_scomp_init_tfm,
+ .destroy = crypto_scomp_destroy,
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_scomp_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
.tfmsize = offsetof(struct crypto_scomp, base),
+ .algsize = offsetof(struct scomp_alg, base),
};
+static void scomp_prepare_alg(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
+}
+
int crypto_register_scomp(struct scomp_alg *alg)
{
- struct crypto_alg *base = &alg->base;
+ struct crypto_alg *base = &alg->calg.base;
+
+ scomp_prepare_alg(alg);
base->cra_type = &crypto_scomp_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/seed.c b/crypto/seed.c
index 27720140820e..815391f213de 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -8,12 +8,12 @@
* Copyright (C) 2007 Korea Information Security Agency (KISA).
*/
+#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
@@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
- const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
- x1 = be32_to_cpu(key[0]);
- x2 = be32_to_cpu(key[1]);
- x3 = be32_to_cpu(key[2]);
- x4 = be32_to_cpu(key[3]);
+ x1 = get_unaligned_be32(&in_key[0]);
+ x2 = get_unaligned_be32(&in_key[4]);
+ x3 = get_unaligned_be32(&in_key[8]);
+ x4 = get_unaligned_be32(&in_key[12]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
@@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
@@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
/* decrypt a block of text */
@@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
@@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
@@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -466,7 +460,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-subsys_initcall(seed_init);
+module_init(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 0899d527c284..2bae99e33526 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
if (err)
@@ -36,10 +36,9 @@ out:
kfree_sensitive(subreq->iv);
}
-static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
- int err)
+static void seqiv_aead_encrypt_complete(void *data, int err)
{
- struct aead_request *req = base->data;
+ struct aead_request *req = data;
seqiv_aead_encrypt_complete2(req, err);
aead_request_complete(req, err);
@@ -65,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
data = req->base.data;
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
@@ -180,7 +168,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-subsys_initcall(seqiv_module_init);
+module_init(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 45f98b750053..b21e7606c652 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -7,11 +7,11 @@
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
*/
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <asm/unaligned.h>
-#include <linux/crypto.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#include <crypto/serpent.h>
@@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_alg(&srp_alg);
}
-subsys_initcall(serpent_mod_init);
+module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1.c b/crypto/sha1.c
new file mode 100644
index 000000000000..4fbf61cf0370
--- /dev/null
+++ b/crypto/sha1.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for SHA-1 and HMAC-SHA1
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ * Copyright 2025 Google LLC
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha1.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/*
+ * Export and import functions. crypto_shash wants a particular format that
+ * matches that used by some legacy drivers. It currently is the same as the
+ * library SHA context, except the value in bytecount must be block-aligned and
+ * the remainder must be stored in an extra u8 appended to the struct.
+ */
+
+#define SHA1_SHASH_STATE_SIZE (sizeof(struct sha1_ctx) + 1)
+static_assert(sizeof(struct sha1_ctx) == sizeof(struct sha1_state));
+static_assert(offsetof(struct sha1_ctx, state) == offsetof(struct sha1_state, state));
+static_assert(offsetof(struct sha1_ctx, bytecount) == offsetof(struct sha1_state, count));
+static_assert(offsetof(struct sha1_ctx, buf) == offsetof(struct sha1_state, buffer));
+
+static int __crypto_sha1_export(const struct sha1_ctx *ctx0, void *out)
+{
+ struct sha1_ctx ctx = *ctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = ctx.bytecount % SHA1_BLOCK_SIZE;
+ ctx.bytecount -= partial;
+ memcpy(p, &ctx, sizeof(ctx));
+ p += sizeof(ctx);
+ *p = partial;
+ return 0;
+}
+
+static int __crypto_sha1_import(struct sha1_ctx *ctx, const void *in)
+{
+ const u8 *p = in;
+
+ memcpy(ctx, p, sizeof(*ctx));
+ p += sizeof(*ctx);
+ ctx->bytecount += *p;
+ return 0;
+}
+
+static int __crypto_sha1_export_core(const struct sha1_ctx *ctx, void *out)
+{
+ memcpy(out, ctx, offsetof(struct sha1_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha1_import_core(struct sha1_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct sha1_ctx, buf));
+ return 0;
+}
+
+const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
+
+#define SHA1_CTX(desc) ((struct sha1_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha1_init(struct shash_desc *desc)
+{
+ sha1_init(SHA1_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha1_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ sha1_update(SHA1_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ sha1_final(SHA1_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha1_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha1(data, len, out);
+ return 0;
+}
+
+static int crypto_sha1_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export(SHA1_CTX(desc), out);
+}
+
+static int crypto_sha1_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha1_import(SHA1_CTX(desc), in);
+}
+
+static int crypto_sha1_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export_core(SHA1_CTX(desc), out);
+}
+
+static int crypto_sha1_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha1_import_core(SHA1_CTX(desc), in);
+}
+
+#define HMAC_SHA1_KEY(tfm) ((struct hmac_sha1_key *)crypto_shash_ctx(tfm))
+#define HMAC_SHA1_CTX(desc) ((struct hmac_sha1_ctx *)shash_desc_ctx(desc))
+
+static int crypto_hmac_sha1_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_sha1_preparekey(HMAC_SHA1_KEY(tfm), raw_key, keylen);
+ return 0;
+}
+
+static int crypto_hmac_sha1_init(struct shash_desc *desc)
+{
+ hmac_sha1_init(HMAC_SHA1_CTX(desc), HMAC_SHA1_KEY(desc->tfm));
+ return 0;
+}
+
+static int crypto_hmac_sha1_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_sha1_update(HMAC_SHA1_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_hmac_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_sha1_final(HMAC_SHA1_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_hmac_sha1_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ hmac_sha1(HMAC_SHA1_KEY(desc->tfm), data, len, out);
+ return 0;
+}
+
+static int crypto_hmac_sha1_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export(&HMAC_SHA1_CTX(desc)->sha_ctx, out);
+}
+
+static int crypto_hmac_sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha1_ctx *ctx = HMAC_SHA1_CTX(desc);
+
+ ctx->ostate = HMAC_SHA1_KEY(desc->tfm)->ostate;
+ return __crypto_sha1_import(&ctx->sha_ctx, in);
+}
+
+static int crypto_hmac_sha1_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export_core(&HMAC_SHA1_CTX(desc)->sha_ctx, out);
+}
+
+static int crypto_hmac_sha1_import_core(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha1_ctx *ctx = HMAC_SHA1_CTX(desc);
+
+ ctx->ostate = HMAC_SHA1_KEY(desc->tfm)->ostate;
+ return __crypto_sha1_import_core(&ctx->sha_ctx, in);
+}
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha1",
+ .base.cra_driver_name = "sha1-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA1_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = crypto_sha1_init,
+ .update = crypto_sha1_update,
+ .final = crypto_sha1_final,
+ .digest = crypto_sha1_digest,
+ .export = crypto_sha1_export,
+ .import = crypto_sha1_import,
+ .export_core = crypto_sha1_export_core,
+ .import_core = crypto_sha1_import_core,
+ .descsize = sizeof(struct sha1_ctx),
+ .statesize = SHA1_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(sha1)",
+ .base.cra_driver_name = "hmac-sha1-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA1_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_sha1_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA1_DIGEST_SIZE,
+ .setkey = crypto_hmac_sha1_setkey,
+ .init = crypto_hmac_sha1_init,
+ .update = crypto_hmac_sha1_update,
+ .final = crypto_hmac_sha1_final,
+ .digest = crypto_hmac_sha1_digest,
+ .export = crypto_hmac_sha1_export,
+ .import = crypto_hmac_sha1_import,
+ .export_core = crypto_hmac_sha1_export_core,
+ .import_core = crypto_hmac_sha1_import_core,
+ .descsize = sizeof(struct hmac_sha1_ctx),
+ .statesize = SHA1_SHASH_STATE_SIZE,
+ },
+};
+
+static int __init crypto_sha1_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha1_mod_init);
+
+static void __exit crypto_sha1_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_sha1_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for SHA-1 and HMAC-SHA1");
+
+MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-lib");
+MODULE_ALIAS_CRYPTO("hmac(sha1)");
+MODULE_ALIAS_CRYPTO("hmac-sha1-lib");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
deleted file mode 100644
index 325b57fe28dc..000000000000
--- a/crypto/sha1_generic.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * SHA1 Secure Hash Algorithm.
- *
- * Derived from cryptoapi implementation, adapted for in-place
- * scatterlist interface.
- *
- * Copyright (c) Alan Smithee.
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha1.h>
-#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-
-const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
- 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
- 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
- 0xaf, 0xd8, 0x07, 0x09
-};
-EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
-
-static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
- int blocks)
-{
- u32 temp[SHA1_WORKSPACE_WORDS];
-
- while (blocks--) {
- sha1_transform(sst->state, src, temp);
- src += SHA1_BLOCK_SIZE;
- }
- memzero_explicit(temp, sizeof(temp));
-}
-
-int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
-}
-EXPORT_SYMBOL(crypto_sha1_update);
-
-static int sha1_final(struct shash_desc *desc, u8 *out)
-{
- sha1_base_do_finalize(desc, sha1_generic_block_fn);
- return sha1_base_finish(desc, out);
-}
-
-int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha1_finup);
-
-static struct shash_alg alg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_base_init,
- .update = crypto_sha1_update,
- .final = sha1_final,
- .finup = crypto_sha1_finup,
- .descsize = sizeof(struct sha1_state),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name= "sha1-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init sha1_generic_mod_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit sha1_generic_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-subsys_initcall(sha1_generic_mod_init);
-module_exit(sha1_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha1");
-MODULE_ALIAS_CRYPTO("sha1-generic");
diff --git a/crypto/sha256.c b/crypto/sha256.c
new file mode 100644
index 000000000000..fb81defe084c
--- /dev/null
+++ b/crypto/sha256.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ * Copyright 2025 Google LLC
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/*
+ * Export and import functions. crypto_shash wants a particular format that
+ * matches that used by some legacy drivers. It currently is the same as the
+ * library SHA context, except the value in bytecount must be block-aligned and
+ * the remainder must be stored in an extra u8 appended to the struct.
+ */
+
+#define SHA256_SHASH_STATE_SIZE 105
+static_assert(offsetof(struct __sha256_ctx, state) == 0);
+static_assert(offsetof(struct __sha256_ctx, bytecount) == 32);
+static_assert(offsetof(struct __sha256_ctx, buf) == 40);
+static_assert(sizeof(struct __sha256_ctx) + 1 == SHA256_SHASH_STATE_SIZE);
+
+static int __crypto_sha256_export(const struct __sha256_ctx *ctx0, void *out)
+{
+ struct __sha256_ctx ctx = *ctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = ctx.bytecount % SHA256_BLOCK_SIZE;
+ ctx.bytecount -= partial;
+ memcpy(p, &ctx, sizeof(ctx));
+ p += sizeof(ctx);
+ *p = partial;
+ return 0;
+}
+
+static int __crypto_sha256_import(struct __sha256_ctx *ctx, const void *in)
+{
+ const u8 *p = in;
+
+ memcpy(ctx, p, sizeof(*ctx));
+ p += sizeof(*ctx);
+ ctx->bytecount += *p;
+ return 0;
+}
+
+static int __crypto_sha256_export_core(const struct __sha256_ctx *ctx,
+ void *out)
+{
+ memcpy(out, ctx, offsetof(struct __sha256_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha256_import_core(struct __sha256_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct __sha256_ctx, buf));
+ return 0;
+}
+
+/* SHA-224 */
+
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+#define SHA224_CTX(desc) ((struct sha224_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha224_init(struct shash_desc *desc)
+{
+ sha224_init(SHA224_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha224_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ sha224_update(SHA224_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha224_final(struct shash_desc *desc, u8 *out)
+{
+ sha224_final(SHA224_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha224_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha224(data, len, out);
+ return 0;
+}
+
+static int crypto_sha224_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export(&SHA224_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha224_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import(&SHA224_CTX(desc)->ctx, in);
+}
+
+static int crypto_sha224_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&SHA224_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha224_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import_core(&SHA224_CTX(desc)->ctx, in);
+}
+
+/* SHA-256 */
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
+#define SHA256_CTX(desc) ((struct sha256_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha256_init(struct shash_desc *desc)
+{
+ sha256_init(SHA256_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha256_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ sha256_update(SHA256_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ sha256_final(SHA256_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha256_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha256_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export(&SHA256_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha256_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import(&SHA256_CTX(desc)->ctx, in);
+}
+
+static int crypto_sha256_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&SHA256_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha256_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import_core(&SHA256_CTX(desc)->ctx, in);
+}
+
+/* HMAC-SHA224 */
+
+#define HMAC_SHA224_KEY(tfm) ((struct hmac_sha224_key *)crypto_shash_ctx(tfm))
+#define HMAC_SHA224_CTX(desc) ((struct hmac_sha224_ctx *)shash_desc_ctx(desc))
+
+static int crypto_hmac_sha224_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_sha224_preparekey(HMAC_SHA224_KEY(tfm), raw_key, keylen);
+ return 0;
+}
+
+static int crypto_hmac_sha224_init(struct shash_desc *desc)
+{
+ hmac_sha224_init(HMAC_SHA224_CTX(desc), HMAC_SHA224_KEY(desc->tfm));
+ return 0;
+}
+
+static int crypto_hmac_sha224_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_sha224_update(HMAC_SHA224_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_hmac_sha224_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_sha224_final(HMAC_SHA224_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_hmac_sha224_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ u8 *out)
+{
+ hmac_sha224(HMAC_SHA224_KEY(desc->tfm), data, len, out);
+ return 0;
+}
+
+static int crypto_hmac_sha224_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export(&HMAC_SHA224_CTX(desc)->ctx.sha_ctx, out);
+}
+
+static int crypto_hmac_sha224_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha224_ctx *ctx = HMAC_SHA224_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA224_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import(&ctx->ctx.sha_ctx, in);
+}
+
+static int crypto_hmac_sha224_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&HMAC_SHA224_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha224_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha224_ctx *ctx = HMAC_SHA224_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA224_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in);
+}
+
+/* HMAC-SHA256 */
+
+#define HMAC_SHA256_KEY(tfm) ((struct hmac_sha256_key *)crypto_shash_ctx(tfm))
+#define HMAC_SHA256_CTX(desc) ((struct hmac_sha256_ctx *)shash_desc_ctx(desc))
+
+static int crypto_hmac_sha256_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_sha256_preparekey(HMAC_SHA256_KEY(tfm), raw_key, keylen);
+ return 0;
+}
+
+static int crypto_hmac_sha256_init(struct shash_desc *desc)
+{
+ hmac_sha256_init(HMAC_SHA256_CTX(desc), HMAC_SHA256_KEY(desc->tfm));
+ return 0;
+}
+
+static int crypto_hmac_sha256_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_sha256_update(HMAC_SHA256_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_hmac_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_sha256_final(HMAC_SHA256_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_hmac_sha256_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ u8 *out)
+{
+ hmac_sha256(HMAC_SHA256_KEY(desc->tfm), data, len, out);
+ return 0;
+}
+
+static int crypto_hmac_sha256_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export(&HMAC_SHA256_CTX(desc)->ctx.sha_ctx, out);
+}
+
+static int crypto_hmac_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha256_ctx *ctx = HMAC_SHA256_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA256_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import(&ctx->ctx.sha_ctx, in);
+}
+
+static int crypto_hmac_sha256_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&HMAC_SHA256_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha256_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha256_ctx *ctx = HMAC_SHA256_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA256_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in);
+}
+
+/* Algorithm definitions */
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha224_update,
+ .final = crypto_sha224_final,
+ .digest = crypto_sha224_digest,
+ .export = crypto_sha224_export,
+ .import = crypto_sha224_import,
+ .export_core = crypto_sha224_export_core,
+ .import_core = crypto_sha224_import_core,
+ .descsize = sizeof(struct sha224_ctx),
+ .statesize = SHA256_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update,
+ .final = crypto_sha256_final,
+ .digest = crypto_sha256_digest,
+ .export = crypto_sha256_export,
+ .import = crypto_sha256_import,
+ .export_core = crypto_sha256_export_core,
+ .import_core = crypto_sha256_import_core,
+ .descsize = sizeof(struct sha256_ctx),
+ .statesize = SHA256_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(sha224)",
+ .base.cra_driver_name = "hmac-sha224-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_sha224_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .setkey = crypto_hmac_sha224_setkey,
+ .init = crypto_hmac_sha224_init,
+ .update = crypto_hmac_sha224_update,
+ .final = crypto_hmac_sha224_final,
+ .digest = crypto_hmac_sha224_digest,
+ .export = crypto_hmac_sha224_export,
+ .import = crypto_hmac_sha224_import,
+ .export_core = crypto_hmac_sha224_export_core,
+ .import_core = crypto_hmac_sha224_import_core,
+ .descsize = sizeof(struct hmac_sha224_ctx),
+ .statesize = SHA256_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(sha256)",
+ .base.cra_driver_name = "hmac-sha256-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_sha256_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .setkey = crypto_hmac_sha256_setkey,
+ .init = crypto_hmac_sha256_init,
+ .update = crypto_hmac_sha256_update,
+ .final = crypto_hmac_sha256_final,
+ .digest = crypto_hmac_sha256_digest,
+ .export = crypto_hmac_sha256_export,
+ .import = crypto_hmac_sha256_import,
+ .export_core = crypto_hmac_sha256_export_core,
+ .import_core = crypto_hmac_sha256_import_core,
+ .descsize = sizeof(struct hmac_sha256_ctx),
+ .statesize = SHA256_SHASH_STATE_SIZE,
+ },
+};
+
+static int __init crypto_sha256_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha256_mod_init);
+
+static void __exit crypto_sha256_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_sha256_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256");
+
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-lib");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-lib");
+MODULE_ALIAS_CRYPTO("hmac(sha224)");
+MODULE_ALIAS_CRYPTO("hmac-sha224-lib");
+MODULE_ALIAS_CRYPTO("hmac(sha256)");
+MODULE_ALIAS_CRYPTO("hmac-sha256-lib");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
deleted file mode 100644
index bf147b01e313..000000000000
--- a/crypto/sha256_generic.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
- 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
- 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
- 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
- 0x2f
-};
-EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
-
-const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
- 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
- 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
- 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
- 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
-};
-EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
-
-int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha256_update);
-
-static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
-{
- if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
- sha224_final(shash_desc_ctx(desc), out);
- else
- sha256_final(shash_desc_ctx(desc), out);
- return 0;
-}
-
-int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return crypto_sha256_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha256_finup);
-
-static struct shash_alg sha256_algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_generic_mod_init(void)
-{
- return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-static void __exit sha256_generic_mod_fini(void)
-{
- crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-subsys_initcall(sha256_generic_mod_init);
-module_exit(sha256_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-generic");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha3.c b/crypto/sha3.c
new file mode 100644
index 000000000000..8f364979ec89
--- /dev/null
+++ b/crypto/sha3.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for SHA-3
+ * (https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf)
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define SHA3_CTX(desc) ((struct sha3_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha3_224_init(struct shash_desc *desc)
+{
+ sha3_224_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_256_init(struct shash_desc *desc)
+{
+ sha3_256_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_384_init(struct shash_desc *desc)
+{
+ sha3_384_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_512_init(struct shash_desc *desc)
+{
+ sha3_512_init(SHA3_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ sha3_update(SHA3_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha3_final(struct shash_desc *desc, u8 *out)
+{
+ sha3_final(SHA3_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha3_224_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_224(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_256_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_384_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_384(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_512_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha3_512(data, len, out);
+ return 0;
+}
+
+static int crypto_sha3_export_core(struct shash_desc *desc, void *out)
+{
+ memcpy(out, SHA3_CTX(desc), sizeof(struct sha3_ctx));
+ return 0;
+}
+
+static int crypto_sha3_import_core(struct shash_desc *desc, const void *in)
+{
+ memcpy(SHA3_CTX(desc), in, sizeof(struct sha3_ctx));
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_224_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_224_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-lib",
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_256_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_256_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-lib",
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_384_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_384_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-lib",
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_512_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .digest = crypto_sha3_512_digest,
+ .export_core = crypto_sha3_export_core,
+ .import_core = crypto_sha3_import_core,
+ .descsize = sizeof(struct sha3_ctx),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-lib",
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init crypto_sha3_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha3_mod_init);
+
+static void __exit crypto_sha3_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_sha3_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for SHA-3");
+
+MODULE_ALIAS_CRYPTO("sha3-224");
+MODULE_ALIAS_CRYPTO("sha3-224-lib");
+MODULE_ALIAS_CRYPTO("sha3-256");
+MODULE_ALIAS_CRYPTO("sha3-256-lib");
+MODULE_ALIAS_CRYPTO("sha3-384");
+MODULE_ALIAS_CRYPTO("sha3-384-lib");
+MODULE_ALIAS_CRYPTO("sha3-512");
+MODULE_ALIAS_CRYPTO("sha3-512-lib");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
deleted file mode 100644
index 3e4069935b53..000000000000
--- a/crypto/sha3_generic.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * SHA-3, as specified in
- * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
- *
- * SHA-3 code by Jeff Garzik <jeff@garzik.org>
- * Ard Biesheuvel <ard.biesheuvel@linaro.org>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <crypto/sha3.h>
-#include <asm/unaligned.h>
-
-/*
- * On some 32-bit architectures (h8300), GCC ends up using
- * over 1 KB of stack if we inline the round calculation into the loop
- * in keccakf(). On the other hand, on 64-bit architectures with plenty
- * of [64-bit wide] general purpose registers, not inlining it severely
- * hurts performance. So let's use 64-bitness as a heuristic to decide
- * whether to inline or not.
- */
-#ifdef CONFIG_64BIT
-#define SHA3_INLINE inline
-#else
-#define SHA3_INLINE noinline
-#endif
-
-#define KECCAK_ROUNDS 24
-
-static const u64 keccakf_rndc[24] = {
- 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
- 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
- 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
- 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
- 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
- 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
- 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
- 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
-};
-
-/* update the state with given number of rounds */
-
-static SHA3_INLINE void keccakf_round(u64 st[25])
-{
- u64 t[5], tt, bc[5];
-
- /* Theta */
- bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
- bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
- bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
- bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
- bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
-
- t[0] = bc[4] ^ rol64(bc[1], 1);
- t[1] = bc[0] ^ rol64(bc[2], 1);
- t[2] = bc[1] ^ rol64(bc[3], 1);
- t[3] = bc[2] ^ rol64(bc[4], 1);
- t[4] = bc[3] ^ rol64(bc[0], 1);
-
- st[0] ^= t[0];
-
- /* Rho Pi */
- tt = st[1];
- st[ 1] = rol64(st[ 6] ^ t[1], 44);
- st[ 6] = rol64(st[ 9] ^ t[4], 20);
- st[ 9] = rol64(st[22] ^ t[2], 61);
- st[22] = rol64(st[14] ^ t[4], 39);
- st[14] = rol64(st[20] ^ t[0], 18);
- st[20] = rol64(st[ 2] ^ t[2], 62);
- st[ 2] = rol64(st[12] ^ t[2], 43);
- st[12] = rol64(st[13] ^ t[3], 25);
- st[13] = rol64(st[19] ^ t[4], 8);
- st[19] = rol64(st[23] ^ t[3], 56);
- st[23] = rol64(st[15] ^ t[0], 41);
- st[15] = rol64(st[ 4] ^ t[4], 27);
- st[ 4] = rol64(st[24] ^ t[4], 14);
- st[24] = rol64(st[21] ^ t[1], 2);
- st[21] = rol64(st[ 8] ^ t[3], 55);
- st[ 8] = rol64(st[16] ^ t[1], 45);
- st[16] = rol64(st[ 5] ^ t[0], 36);
- st[ 5] = rol64(st[ 3] ^ t[3], 28);
- st[ 3] = rol64(st[18] ^ t[3], 21);
- st[18] = rol64(st[17] ^ t[2], 15);
- st[17] = rol64(st[11] ^ t[1], 10);
- st[11] = rol64(st[ 7] ^ t[2], 6);
- st[ 7] = rol64(st[10] ^ t[0], 3);
- st[10] = rol64( tt ^ t[1], 1);
-
- /* Chi */
- bc[ 0] = ~st[ 1] & st[ 2];
- bc[ 1] = ~st[ 2] & st[ 3];
- bc[ 2] = ~st[ 3] & st[ 4];
- bc[ 3] = ~st[ 4] & st[ 0];
- bc[ 4] = ~st[ 0] & st[ 1];
- st[ 0] ^= bc[ 0];
- st[ 1] ^= bc[ 1];
- st[ 2] ^= bc[ 2];
- st[ 3] ^= bc[ 3];
- st[ 4] ^= bc[ 4];
-
- bc[ 0] = ~st[ 6] & st[ 7];
- bc[ 1] = ~st[ 7] & st[ 8];
- bc[ 2] = ~st[ 8] & st[ 9];
- bc[ 3] = ~st[ 9] & st[ 5];
- bc[ 4] = ~st[ 5] & st[ 6];
- st[ 5] ^= bc[ 0];
- st[ 6] ^= bc[ 1];
- st[ 7] ^= bc[ 2];
- st[ 8] ^= bc[ 3];
- st[ 9] ^= bc[ 4];
-
- bc[ 0] = ~st[11] & st[12];
- bc[ 1] = ~st[12] & st[13];
- bc[ 2] = ~st[13] & st[14];
- bc[ 3] = ~st[14] & st[10];
- bc[ 4] = ~st[10] & st[11];
- st[10] ^= bc[ 0];
- st[11] ^= bc[ 1];
- st[12] ^= bc[ 2];
- st[13] ^= bc[ 3];
- st[14] ^= bc[ 4];
-
- bc[ 0] = ~st[16] & st[17];
- bc[ 1] = ~st[17] & st[18];
- bc[ 2] = ~st[18] & st[19];
- bc[ 3] = ~st[19] & st[15];
- bc[ 4] = ~st[15] & st[16];
- st[15] ^= bc[ 0];
- st[16] ^= bc[ 1];
- st[17] ^= bc[ 2];
- st[18] ^= bc[ 3];
- st[19] ^= bc[ 4];
-
- bc[ 0] = ~st[21] & st[22];
- bc[ 1] = ~st[22] & st[23];
- bc[ 2] = ~st[23] & st[24];
- bc[ 3] = ~st[24] & st[20];
- bc[ 4] = ~st[20] & st[21];
- st[20] ^= bc[ 0];
- st[21] ^= bc[ 1];
- st[22] ^= bc[ 2];
- st[23] ^= bc[ 3];
- st[24] ^= bc[ 4];
-}
-
-static void keccakf(u64 st[25])
-{
- int round;
-
- for (round = 0; round < KECCAK_ROUNDS; round++) {
- keccakf_round(st);
- /* Iota */
- st[0] ^= keccakf_rndc[round];
- }
-}
-
-int crypto_sha3_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
-
- sctx->rsiz = 200 - 2 * digest_size;
- sctx->rsizw = sctx->rsiz / 8;
- sctx->partial = 0;
-
- memset(sctx->st, 0, sizeof(sctx->st));
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha3_init);
-
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int done;
- const u8 *src;
-
- done = 0;
- src = data;
-
- if ((sctx->partial + len) > (sctx->rsiz - 1)) {
- if (sctx->partial) {
- done = -sctx->partial;
- memcpy(sctx->buf + sctx->partial, data,
- done + sctx->rsiz);
- src = sctx->buf;
- }
-
- do {
- unsigned int i;
-
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
- keccakf(sctx->st);
-
- done += sctx->rsiz;
- src = data + done;
- } while (done + (sctx->rsiz - 1) < len);
-
- sctx->partial = 0;
- }
- memcpy(sctx->buf + sctx->partial, src, len - done);
- sctx->partial += (len - done);
-
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha3_update);
-
-int crypto_sha3_final(struct shash_desc *desc, u8 *out)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int i, inlen = sctx->partial;
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- __le64 *digest = (__le64 *)out;
-
- sctx->buf[inlen++] = 0x06;
- memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
- sctx->buf[sctx->rsiz - 1] |= 0x80;
-
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
-
- keccakf(sctx->st);
-
- for (i = 0; i < digest_size / 8; i++)
- put_unaligned_le64(sctx->st[i], digest++);
-
- if (digest_size & 4)
- put_unaligned_le32(sctx->st[i], (__le32 *)digest);
-
- memset(sctx, 0, sizeof(*sctx));
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha3_final);
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base.cra_name = "sha3-224",
- .base.cra_driver_name = "sha3-224-generic",
- .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base.cra_name = "sha3-256",
- .base.cra_driver_name = "sha3-256-generic",
- .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base.cra_name = "sha3-384",
- .base.cra_driver_name = "sha3-384-generic",
- .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = crypto_sha3_init,
- .update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base.cra_name = "sha3-512",
- .base.cra_driver_name = "sha3-512-generic",
- .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init sha3_generic_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha3_generic_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-subsys_initcall(sha3_generic_mod_init);
-module_exit(sha3_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha3-224");
-MODULE_ALIAS_CRYPTO("sha3-224-generic");
-MODULE_ALIAS_CRYPTO("sha3-256");
-MODULE_ALIAS_CRYPTO("sha3-256-generic");
-MODULE_ALIAS_CRYPTO("sha3-384");
-MODULE_ALIAS_CRYPTO("sha3-384-generic");
-MODULE_ALIAS_CRYPTO("sha3-512");
-MODULE_ALIAS_CRYPTO("sha3-512-generic");
diff --git a/crypto/sha512.c b/crypto/sha512.c
new file mode 100644
index 000000000000..d320fe53913f
--- /dev/null
+++ b/crypto/sha512.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API support for SHA-384, SHA-512, HMAC-SHA384, and HMAC-SHA512
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
+ * Copyright 2025 Google LLC
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/*
+ * Export and import functions. crypto_shash wants a particular format that
+ * matches that used by some legacy drivers. It currently is the same as the
+ * library SHA context, except the value in bytecount_lo must be block-aligned
+ * and the remainder must be stored in an extra u8 appended to the struct.
+ */
+
+#define SHA512_SHASH_STATE_SIZE 209
+static_assert(offsetof(struct __sha512_ctx, state) == 0);
+static_assert(offsetof(struct __sha512_ctx, bytecount_lo) == 64);
+static_assert(offsetof(struct __sha512_ctx, bytecount_hi) == 72);
+static_assert(offsetof(struct __sha512_ctx, buf) == 80);
+static_assert(sizeof(struct __sha512_ctx) + 1 == SHA512_SHASH_STATE_SIZE);
+
+static int __crypto_sha512_export(const struct __sha512_ctx *ctx0, void *out)
+{
+ struct __sha512_ctx ctx = *ctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = ctx.bytecount_lo % SHA512_BLOCK_SIZE;
+ ctx.bytecount_lo -= partial;
+ memcpy(p, &ctx, sizeof(ctx));
+ p += sizeof(ctx);
+ *p = partial;
+ return 0;
+}
+
+static int __crypto_sha512_import(struct __sha512_ctx *ctx, const void *in)
+{
+ const u8 *p = in;
+
+ memcpy(ctx, p, sizeof(*ctx));
+ p += sizeof(*ctx);
+ ctx->bytecount_lo += *p;
+ return 0;
+}
+
+static int __crypto_sha512_export_core(const struct __sha512_ctx *ctx,
+ void *out)
+{
+ memcpy(out, ctx, offsetof(struct __sha512_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha512_import_core(struct __sha512_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct __sha512_ctx, buf));
+ return 0;
+}
+
+/* SHA-384 */
+
+const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+ 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+ 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+ 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+ 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+ 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
+};
+EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
+
+#define SHA384_CTX(desc) ((struct sha384_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha384_init(struct shash_desc *desc)
+{
+ sha384_init(SHA384_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha384_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ sha384_update(SHA384_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha384_final(struct shash_desc *desc, u8 *out)
+{
+ sha384_final(SHA384_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha384_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha384(data, len, out);
+ return 0;
+}
+
+static int crypto_sha384_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export(&SHA384_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha384_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import(&SHA384_CTX(desc)->ctx, in);
+}
+
+static int crypto_sha384_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&SHA384_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha384_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import_core(&SHA384_CTX(desc)->ctx, in);
+}
+
+/* SHA-512 */
+
+const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
+ 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+ 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+ 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+ 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+ 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+ 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+ 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+ 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
+};
+EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
+
+#define SHA512_CTX(desc) ((struct sha512_ctx *)shash_desc_ctx(desc))
+
+static int crypto_sha512_init(struct shash_desc *desc)
+{
+ sha512_init(SHA512_CTX(desc));
+ return 0;
+}
+
+static int crypto_sha512_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ sha512_update(SHA512_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha512_final(struct shash_desc *desc, u8 *out)
+{
+ sha512_final(SHA512_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_sha512_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ sha512(data, len, out);
+ return 0;
+}
+
+static int crypto_sha512_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export(&SHA512_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha512_import(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import(&SHA512_CTX(desc)->ctx, in);
+}
+
+static int crypto_sha512_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&SHA512_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha512_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import_core(&SHA512_CTX(desc)->ctx, in);
+}
+
+/* HMAC-SHA384 */
+
+#define HMAC_SHA384_KEY(tfm) ((struct hmac_sha384_key *)crypto_shash_ctx(tfm))
+#define HMAC_SHA384_CTX(desc) ((struct hmac_sha384_ctx *)shash_desc_ctx(desc))
+
+static int crypto_hmac_sha384_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_sha384_preparekey(HMAC_SHA384_KEY(tfm), raw_key, keylen);
+ return 0;
+}
+
+static int crypto_hmac_sha384_init(struct shash_desc *desc)
+{
+ hmac_sha384_init(HMAC_SHA384_CTX(desc), HMAC_SHA384_KEY(desc->tfm));
+ return 0;
+}
+
+static int crypto_hmac_sha384_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_sha384_update(HMAC_SHA384_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_hmac_sha384_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_sha384_final(HMAC_SHA384_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_hmac_sha384_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ u8 *out)
+{
+ hmac_sha384(HMAC_SHA384_KEY(desc->tfm), data, len, out);
+ return 0;
+}
+
+static int crypto_hmac_sha384_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export(&HMAC_SHA384_CTX(desc)->ctx.sha_ctx, out);
+}
+
+static int crypto_hmac_sha384_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha384_ctx *ctx = HMAC_SHA384_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA384_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import(&ctx->ctx.sha_ctx, in);
+}
+
+static int crypto_hmac_sha384_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&HMAC_SHA384_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha384_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha384_ctx *ctx = HMAC_SHA384_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA384_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in);
+}
+
+/* HMAC-SHA512 */
+
+#define HMAC_SHA512_KEY(tfm) ((struct hmac_sha512_key *)crypto_shash_ctx(tfm))
+#define HMAC_SHA512_CTX(desc) ((struct hmac_sha512_ctx *)shash_desc_ctx(desc))
+
+static int crypto_hmac_sha512_setkey(struct crypto_shash *tfm,
+ const u8 *raw_key, unsigned int keylen)
+{
+ hmac_sha512_preparekey(HMAC_SHA512_KEY(tfm), raw_key, keylen);
+ return 0;
+}
+
+static int crypto_hmac_sha512_init(struct shash_desc *desc)
+{
+ hmac_sha512_init(HMAC_SHA512_CTX(desc), HMAC_SHA512_KEY(desc->tfm));
+ return 0;
+}
+
+static int crypto_hmac_sha512_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ hmac_sha512_update(HMAC_SHA512_CTX(desc), data, len);
+ return 0;
+}
+
+static int crypto_hmac_sha512_final(struct shash_desc *desc, u8 *out)
+{
+ hmac_sha512_final(HMAC_SHA512_CTX(desc), out);
+ return 0;
+}
+
+static int crypto_hmac_sha512_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ u8 *out)
+{
+ hmac_sha512(HMAC_SHA512_KEY(desc->tfm), data, len, out);
+ return 0;
+}
+
+static int crypto_hmac_sha512_export(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export(&HMAC_SHA512_CTX(desc)->ctx.sha_ctx, out);
+}
+
+static int crypto_hmac_sha512_import(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha512_ctx *ctx = HMAC_SHA512_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA512_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import(&ctx->ctx.sha_ctx, in);
+}
+
+static int crypto_hmac_sha512_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&HMAC_SHA512_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha512_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha512_ctx *ctx = HMAC_SHA512_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA512_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in);
+}
+
+/* Algorithm definitions */
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha384",
+ .base.cra_driver_name = "sha384-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA384_DIGEST_SIZE,
+ .init = crypto_sha384_init,
+ .update = crypto_sha384_update,
+ .final = crypto_sha384_final,
+ .digest = crypto_sha384_digest,
+ .export = crypto_sha384_export,
+ .import = crypto_sha384_import,
+ .export_core = crypto_sha384_export_core,
+ .import_core = crypto_sha384_import_core,
+ .descsize = sizeof(struct sha384_ctx),
+ .statesize = SHA512_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "sha512",
+ .base.cra_driver_name = "sha512-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = crypto_sha512_init,
+ .update = crypto_sha512_update,
+ .final = crypto_sha512_final,
+ .digest = crypto_sha512_digest,
+ .export = crypto_sha512_export,
+ .import = crypto_sha512_import,
+ .export_core = crypto_sha512_export_core,
+ .import_core = crypto_sha512_import_core,
+ .descsize = sizeof(struct sha512_ctx),
+ .statesize = SHA512_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(sha384)",
+ .base.cra_driver_name = "hmac-sha384-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_sha384_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA384_DIGEST_SIZE,
+ .setkey = crypto_hmac_sha384_setkey,
+ .init = crypto_hmac_sha384_init,
+ .update = crypto_hmac_sha384_update,
+ .final = crypto_hmac_sha384_final,
+ .digest = crypto_hmac_sha384_digest,
+ .export = crypto_hmac_sha384_export,
+ .import = crypto_hmac_sha384_import,
+ .export_core = crypto_hmac_sha384_export_core,
+ .import_core = crypto_hmac_sha384_import_core,
+ .descsize = sizeof(struct hmac_sha384_ctx),
+ .statesize = SHA512_SHASH_STATE_SIZE,
+ },
+ {
+ .base.cra_name = "hmac(sha512)",
+ .base.cra_driver_name = "hmac-sha512-lib",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct hmac_sha512_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA512_DIGEST_SIZE,
+ .setkey = crypto_hmac_sha512_setkey,
+ .init = crypto_hmac_sha512_init,
+ .update = crypto_hmac_sha512_update,
+ .final = crypto_hmac_sha512_final,
+ .digest = crypto_hmac_sha512_digest,
+ .export = crypto_hmac_sha512_export,
+ .import = crypto_hmac_sha512_import,
+ .export_core = crypto_hmac_sha512_export_core,
+ .import_core = crypto_hmac_sha512_import_core,
+ .descsize = sizeof(struct hmac_sha512_ctx),
+ .statesize = SHA512_SHASH_STATE_SIZE,
+ },
+};
+
+static int __init crypto_sha512_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha512_mod_init);
+
+static void __exit crypto_sha512_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+module_exit(crypto_sha512_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API support for SHA-384, SHA-512, HMAC-SHA384, and HMAC-SHA512");
+
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-lib");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-lib");
+MODULE_ALIAS_CRYPTO("hmac(sha384)");
+MODULE_ALIAS_CRYPTO("hmac-sha384-lib");
+MODULE_ALIAS_CRYPTO("hmac(sha512)");
+MODULE_ALIAS_CRYPTO("hmac-sha512-lib");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
deleted file mode 100644
index be70e76d6d86..000000000000
--- a/crypto/sha512_generic.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* SHA-512 code by Jean-Luc Cooke <jlcooke@certainkey.com>
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
- */
-#include <crypto/internal/hash.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha512_base.h>
-#include <linux/percpu.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
- 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
- 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
- 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
- 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
- 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
- 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
-};
-EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
-
-const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
- 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
- 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
- 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
- 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
- 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
- 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
- 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
- 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
-};
-EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
-
-static inline u64 Ch(u64 x, u64 y, u64 z)
-{
- return z ^ (x & (y ^ z));
-}
-
-static inline u64 Maj(u64 x, u64 y, u64 z)
-{
- return (x & y) | (z & (x | y));
-}
-
-static const u64 sha512_K[80] = {
- 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
- 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
- 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
- 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
- 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
- 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
- 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
- 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
- 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
- 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
- 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
- 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
- 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
- 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
- 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
- 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
- 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
- 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
- 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
- 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
- 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
- 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
- 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
- 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
- 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
- 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
- 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
-};
-
-#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39))
-#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41))
-#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
-#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6))
-
-static inline void LOAD_OP(int I, u64 *W, const u8 *input)
-{
- W[I] = get_unaligned_be64((__u64 *)input + I);
-}
-
-static inline void BLEND_OP(int I, u64 *W)
-{
- W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
-}
-
-static void
-sha512_transform(u64 *state, const u8 *input)
-{
- u64 a, b, c, d, e, f, g, h, t1, t2;
-
- int i;
- u64 W[16];
-
- /* load the state into our registers */
- a=state[0]; b=state[1]; c=state[2]; d=state[3];
- e=state[4]; f=state[5]; g=state[6]; h=state[7];
-
- /* now iterate */
- for (i=0; i<80; i+=8) {
- if (!(i & 8)) {
- int j;
-
- if (i < 16) {
- /* load the input */
- for (j = 0; j < 16; j++)
- LOAD_OP(i + j, W, input);
- } else {
- for (j = 0; j < 16; j++) {
- BLEND_OP(i + j, W);
- }
- }
- }
-
- t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
- }
-
- state[0] += a; state[1] += b; state[2] += c; state[3] += d;
- state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-}
-
-static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
- int blocks)
-{
- while (blocks--) {
- sha512_transform(sst->state, src);
- src += SHA512_BLOCK_SIZE;
- }
-}
-
-int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
-}
-EXPORT_SYMBOL(crypto_sha512_update);
-
-static int sha512_final(struct shash_desc *desc, u8 *hash)
-{
- sha512_base_do_finalize(desc, sha512_generic_block_fn);
- return sha512_base_finish(desc, hash);
-}
-
-int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
- return sha512_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha512_finup);
-
-static struct shash_alg sha512_algs[2] = { {
- .digestsize = SHA512_DIGEST_SIZE,
- .init = sha512_base_init,
- .update = crypto_sha512_update,
- .final = sha512_final,
- .finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA384_DIGEST_SIZE,
- .init = sha384_base_init,
- .update = crypto_sha512_update,
- .final = sha512_final,
- .finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "sha384-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha512_generic_mod_init(void)
-{
- return crypto_register_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
-}
-
-static void __exit sha512_generic_mod_fini(void)
-{
- crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
-}
-
-subsys_initcall(sha512_generic_mod_init);
-module_exit(sha512_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
-
-MODULE_ALIAS_CRYPTO("sha384");
-MODULE_ALIAS_CRYPTO("sha384-generic");
-MODULE_ALIAS_CRYPTO("sha512");
-MODULE_ALIAS_CRYPTO("sha512-generic");
diff --git a/crypto/shash.c b/crypto/shash.c
index 4c88e63b3350..4721f5f134f4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -6,59 +6,40 @@
*/
#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include <linux/compiler.h>
-
-#include "internal.h"
-static const struct crypto_type crypto_shash_type;
+#include "hash.h"
-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+static inline bool crypto_shash_block_only(struct crypto_shash *tfm)
{
- return -ENOSYS;
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
}
-/*
- * Check whether an shash algorithm has a setkey function.
- *
- * For CFI compatibility, this must not be an inline function. This is because
- * when CFI is enabled, modules won't get the same address for shash_no_setkey
- * (if it were exported, which inlining would require) as the core kernel will.
- */
-bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm)
{
- return alg->setkey != shash_no_setkey;
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
}
-EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
-static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+static inline bool crypto_shash_finup_max(struct crypto_shash *tfm)
{
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- unsigned long absize;
- u8 *buffer, *alignbuffer;
- int err;
-
- absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINUP_MAX;
+}
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- err = shash->setkey(tfm, alignbuffer, keylen);
- kfree_sensitive(buffer);
- return err;
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(shash_no_setkey);
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
@@ -70,14 +51,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
- if ((unsigned long)key & alignmask)
- err = shash_setkey_unaligned(tfm, key, keylen);
- else
- err = shash->setkey(tfm, key, keylen);
-
+ err = shash->setkey(tfm, key, keylen);
if (unlikely(err)) {
shash_set_needkey(tfm, shash);
return err;
@@ -88,116 +64,119 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int __crypto_shash_init(struct shash_desc *desc)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- unsigned int unaligned_len = alignmask + 1 -
- ((unsigned long)data & alignmask);
- /*
- * We cannot count on __aligned() working for large values:
- * https://patchwork.kernel.org/patch/9507697/
- */
- u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
- u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
- int err;
-
- if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
- return -EINVAL;
- if (unaligned_len > len)
- unaligned_len = len;
+ if (crypto_shash_block_only(tfm)) {
+ u8 *buf = shash_desc_ctx(desc);
- memcpy(buf, data, unaligned_len);
- err = shash->update(desc, buf, unaligned_len);
- memset(buf, 0, unaligned_len);
+ buf += crypto_shash_descsize(tfm) - 1;
+ *buf = 0;
+ }
- return err ?:
- shash->update(desc, data + unaligned_len, len - unaligned_len);
+ return crypto_shash_alg(tfm)->init(desc);
}
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+int crypto_shash_init(struct shash_desc *desc)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
+ if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return __crypto_shash_init(desc);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_init);
- if ((unsigned long)data & alignmask)
- return shash_update_unaligned(desc, data, len);
+static int shash_default_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
- return shash->update(desc, data, len);
+ return shash->update(desc, data, len) ?:
+ shash->final(desc, out);
}
-EXPORT_SYMBOL_GPL(crypto_shash_update);
-static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
+static int crypto_shash_op_and_zero(
+ int (*op)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out),
+ struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
- struct crypto_shash *tfm = desc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned int ds = crypto_shash_digestsize(tfm);
- /*
- * We cannot count on __aligned() working for large values:
- * https://patchwork.kernel.org/patch/9507697/
- */
- u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
- u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
- if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
- return -EINVAL;
-
- err = shash->final(desc, buf);
- if (err)
- goto out;
-
- memcpy(out, buf, ds);
-
-out:
- memset(buf, 0, ds);
+ err = op(desc, data, len, out);
+ memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm));
return err;
}
-int crypto_shash_final(struct shash_desc *desc, u8 *out)
+int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data,
+ unsigned int len, u8 *restrict out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
+ u8 *blenp = shash_desc_ctx(desc);
+ bool finup_max, nonzero;
+ unsigned int bs;
+ int err;
+ u8 *buf;
- if ((unsigned long)out & alignmask)
- return shash_final_unaligned(desc, out);
+ if (!crypto_shash_block_only(tfm)) {
+ if (out)
+ goto finup;
+ return crypto_shash_alg(tfm)->update(desc, data, len);
+ }
- return shash->final(desc, out);
-}
-EXPORT_SYMBOL_GPL(crypto_shash_final);
+ finup_max = out && crypto_shash_finup_max(tfm);
-static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
-}
+ /* Retain extra block for final nonzero algorithms. */
+ nonzero = crypto_shash_final_nonzero(tfm);
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
+ /*
+ * The partial block buffer follows the algorithm desc context.
+ * The byte following that contains the length.
+ */
+ blenp += crypto_shash_descsize(tfm) - 1;
+ bs = crypto_shash_blocksize(tfm);
+ buf = blenp - bs;
+
+ if (likely(!*blenp && finup_max))
+ goto finup;
+
+ while ((*blenp + len) >= bs + nonzero) {
+ unsigned int nbytes = len - nonzero;
+ const u8 *src = data;
+
+ if (*blenp) {
+ memcpy(buf + *blenp, data, bs - *blenp);
+ nbytes = bs;
+ src = buf;
+ }
+
+ err = crypto_shash_alg(tfm)->update(desc, src, nbytes);
+ if (err < 0)
+ return err;
+
+ data += nbytes - err - *blenp;
+ len -= nbytes - err - *blenp;
+ *blenp = 0;
+ }
- if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_finup_unaligned(desc, data, len, out);
+ if (*blenp || !out) {
+ memcpy(buf + *blenp, data, len);
+ *blenp += len;
+ if (!out)
+ return 0;
+ data = buf;
+ len = *blenp;
+ }
- return shash->finup(desc, data, len, out);
+finup:
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
-static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int shash_default_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return crypto_shash_init(desc) ?:
+ return __crypto_shash_init(desc) ?:
crypto_shash_finup(desc, data, len, out);
}
@@ -205,16 +184,12 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_digest_unaligned(desc, data, len, out);
-
- return shash->digest(desc, data, len, out);
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -222,196 +197,107 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
- int err;
desc->tfm = tfm;
-
- err = crypto_shash_digest(desc, data, len, out);
-
- shash_desc_zero(desc);
-
- return err;
+ return crypto_shash_digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
-static int shash_default_export(struct shash_desc *desc, void *out)
+static int __crypto_shash_export(struct shash_desc *desc, void *out,
+ int (*export)(struct shash_desc *desc,
+ void *out))
{
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
- return 0;
-}
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *buf = shash_desc_ctx(desc);
+ unsigned int plen, ss;
+
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm))
+ ss -= plen;
+ if (!export) {
+ memcpy(out, buf, ss);
+ return 0;
+ }
-static int shash_default_import(struct shash_desc *desc, const void *in)
-{
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
- return 0;
+ return export(desc, out);
}
-static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+int crypto_shash_export_core(struct shash_desc *desc, void *out)
{
- struct crypto_shash **ctx = crypto_ahash_ctx(tfm);
-
- return crypto_shash_setkey(*ctx, key, keylen);
+ return __crypto_shash_export(desc, out,
+ crypto_shash_alg(desc->tfm)->export_core);
}
+EXPORT_SYMBOL_GPL(crypto_shash_export_core);
-static int shash_async_init(struct ahash_request *req)
+int crypto_shash_export(struct shash_desc *desc, void *out)
{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return crypto_shash_init(desc);
-}
-
-int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
-{
- struct crypto_hash_walk walk;
- int nbytes;
-
- for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
- nbytes = crypto_hash_walk_done(&walk, nbytes))
- nbytes = crypto_shash_update(desc, walk.data, nbytes);
+ struct crypto_shash *tfm = desc->tfm;
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(shash_ahash_update);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
-static int shash_async_update(struct ahash_request *req)
-{
- return shash_ahash_update(req, ahash_request_ctx(req));
-}
-
-static int shash_async_final(struct ahash_request *req)
-{
- return crypto_shash_final(ahash_request_ctx(req), req->result);
+ memcpy(out + ss - plen, buf + descsize - plen, plen);
+ }
+ return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export);
}
+EXPORT_SYMBOL_GPL(crypto_shash_export);
-int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
+static int __crypto_shash_import(struct shash_desc *desc, const void *in,
+ int (*import)(struct shash_desc *desc,
+ const void *in))
{
- struct crypto_hash_walk walk;
- int nbytes;
+ struct crypto_shash *tfm = desc->tfm;
+ unsigned int descsize, plen, ss;
+ u8 *buf = shash_desc_ctx(desc);
- nbytes = crypto_hash_walk_first(req, &walk);
- if (!nbytes)
- return crypto_shash_final(desc, req->result);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
- do {
- nbytes = crypto_hash_walk_last(&walk) ?
- crypto_shash_finup(desc, walk.data, nbytes,
- req->result) :
- crypto_shash_update(desc, walk.data, nbytes);
- nbytes = crypto_hash_walk_done(&walk, nbytes);
- } while (nbytes > 0);
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm)) {
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss -= plen;
+ descsize = crypto_shash_descsize(tfm);
+ buf[descsize - 1] = 0;
+ }
+ if (!import) {
+ memcpy(buf, in, ss);
+ return 0;
+ }
- return nbytes;
+ return import(desc, in);
}
-EXPORT_SYMBOL_GPL(shash_ahash_finup);
-static int shash_async_finup(struct ahash_request *req)
+int crypto_shash_import_core(struct shash_desc *desc, const void *in)
{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return shash_ahash_finup(req, desc);
+ return __crypto_shash_import(desc, in,
+ crypto_shash_alg(desc->tfm)->import_core);
}
+EXPORT_SYMBOL_GPL(crypto_shash_import_core);
-int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
+int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- unsigned int nbytes = req->nbytes;
- struct scatterlist *sg;
- unsigned int offset;
+ struct crypto_shash *tfm = desc->tfm;
int err;
- if (nbytes &&
- (sg = req->src, offset = sg->offset,
- nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
- void *data;
-
- data = kmap_atomic(sg_page(sg));
- err = crypto_shash_digest(desc, data + offset, nbytes,
- req->result);
- kunmap_atomic(data);
- } else
- err = crypto_shash_init(desc) ?:
- shash_ahash_finup(req, desc);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(shash_ahash_digest);
-
-static int shash_async_digest(struct ahash_request *req)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return shash_ahash_digest(req, desc);
-}
-
-static int shash_async_export(struct ahash_request *req, void *out)
-{
- return crypto_shash_export(ahash_request_ctx(req), out);
-}
-
-static int shash_async_import(struct ahash_request *req, const void *in)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
+ err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
- return crypto_shash_import(desc, in);
-}
-
-static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
-{
- struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(*ctx);
-}
-
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
-{
- struct crypto_alg *calg = tfm->__crt_alg;
- struct shash_alg *alg = __crypto_shash_alg(calg);
- struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
- struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *shash;
-
- if (!crypto_mod_get(calg))
- return -EAGAIN;
-
- shash = crypto_create_tfm(calg, &crypto_shash_type);
- if (IS_ERR(shash)) {
- crypto_mod_put(calg);
- return PTR_ERR(shash);
+ memcpy(buf + descsize - plen, in + ss - plen, plen);
+ if (buf[descsize - 1] >= plen)
+ err = -EOVERFLOW;
}
-
- *ctx = shash;
- tfm->exit = crypto_exit_shash_ops_async;
-
- crt->init = shash_async_init;
- crt->update = shash_async_update;
- crt->final = shash_async_final;
- crt->finup = shash_async_finup;
- crt->digest = shash_async_digest;
- if (crypto_shash_alg_has_setkey(alg))
- crt->setkey = shash_async_setkey;
-
- crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
- CRYPTO_TFM_NEED_KEY);
-
- crt->export = shash_async_export;
- crt->import = shash_async_import;
-
- crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
-
- return 0;
+ return err;
}
+EXPORT_SYMBOL_GPL(crypto_shash_import);
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
@@ -425,9 +311,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
- int err;
-
- hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
@@ -437,18 +320,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
if (!alg->init_tfm)
return 0;
- err = alg->init_tfm(hash);
- if (err)
- return err;
-
- /* ->init_tfm() may have increased the descsize. */
- if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
- if (alg->exit_tfm)
- alg->exit_tfm(hash);
- return -EINVAL;
- }
-
- return 0;
+ return alg->init_tfm(hash);
}
static void crypto_shash_free_instance(struct crypto_instance *inst)
@@ -458,8 +330,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst)
shash->free(shash);
}
-#ifdef CONFIG_NET
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_shash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
@@ -473,12 +345,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -491,18 +357,21 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
-static const struct crypto_type crypto_shash_type = {
+const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
.free = crypto_shash_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_shash_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
+ .algsize = offsetof(struct shash_alg, base),
};
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
@@ -527,34 +396,124 @@ int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_shash);
-static int shash_prepare_alg(struct shash_alg *alg)
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
+{
+ struct crypto_tfm *tfm = crypto_shash_tfm(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *nhash;
+ int err;
+
+ if (!crypto_shash_alg_has_setkey(alg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init))
+ return ERR_PTR(-ENOSYS);
+
+ nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
+ if (IS_ERR(nhash))
+ return nhash;
+
+ if (alg->clone_tfm) {
+ err = alg->clone_tfm(nhash, hash);
+ if (err) {
+ crypto_free_shash(nhash);
+ return ERR_PTR(err);
+ }
+ }
+
+ if (alg->exit_tfm)
+ crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm;
+
+ return nhash;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_shash);
+
+int hash_prepare_alg(struct hash_alg_common *alg)
{
struct crypto_alg *base = &alg->base;
- if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
- alg->descsize > HASH_MAX_DESCSIZE ||
- alg->statesize > HASH_MAX_STATESIZE)
+ if (alg->digestsize > HASH_MAX_DIGESTSIZE)
+ return -EINVAL;
+
+ /* alignmask is not useful for hashes, so it is not supported. */
+ if (base->cra_alignmask)
return -EINVAL;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ return 0;
+}
+
+static int shash_default_export_core(struct shash_desc *desc, void *out)
+{
+ return -ENOSYS;
+}
+
+static int shash_default_import_core(struct shash_desc *desc, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int shash_prepare_alg(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_shash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
+ /*
+ * Handle missing optional functions. For each one we can either
+ * install a default here, or we can leave the pointer as NULL and check
+ * the pointer for NULL in crypto_shash_*(), avoiding an indirect call
+ * when the default behavior is desired. For ->finup and ->digest we
+ * install defaults, since for optimal performance algorithms should
+ * implement these anyway. On the other hand, for ->import and
+ * ->export the common case and best performance comes from the simple
+ * memcpy of the shash_desc_ctx, so when those pointers are NULL we
+ * leave them NULL and provide the memcpy with no indirect call.
+ */
if (!alg->finup)
- alg->finup = shash_finup_unaligned;
+ alg->finup = shash_default_finup;
if (!alg->digest)
- alg->digest = shash_digest_unaligned;
- if (!alg->export) {
- alg->export = shash_default_export;
- alg->import = shash_default_import;
- alg->statesize = alg->descsize;
- }
+ alg->digest = shash_default_digest;
+ if (!alg->export && !alg->halg.statesize)
+ alg->halg.statesize = alg->descsize;
if (!alg->setkey)
alg->setkey = shash_no_setkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ alg->descsize += base->cra_blocksize + 1;
+ alg->statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = shash_default_export_core;
+ alg->import_core = shash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
+ return -EINVAL;
+ if (alg->statesize > HASH_MAX_STATESIZE)
+ return -EINVAL;
+
+ base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize;
+
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
new file mode 100644
index 000000000000..beba745b6405
--- /dev/null
+++ b/crypto/sig.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <crypto/internal/sig.h>
+#include <linux/cryptouser.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+
+#include "internal.h"
+
+static void crypto_sig_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_sig *sig = __crypto_sig_tfm(tfm);
+ struct sig_alg *alg = crypto_sig_alg(sig);
+
+ alg->exit(sig);
+}
+
+static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_sig *sig = __crypto_sig_tfm(tfm);
+ struct sig_alg *alg = crypto_sig_alg(sig);
+
+ if (alg->exit)
+ sig->base.exit = crypto_sig_exit_tfm;
+
+ if (alg->init)
+ return alg->init(sig);
+
+ return 0;
+}
+
+static void crypto_sig_free_instance(struct crypto_instance *inst)
+{
+ struct sig_instance *sig = sig_instance(inst);
+
+ sig->free(sig);
+}
+
+static void __maybe_unused crypto_sig_show(struct seq_file *m,
+ struct crypto_alg *alg)
+{
+ seq_puts(m, "type : sig\n");
+}
+
+static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct crypto_report_sig rsig = {};
+
+ strscpy(rsig.type, "sig", sizeof(rsig.type));
+
+ return nla_put(skb, CRYPTOCFGA_REPORT_SIG, sizeof(rsig), &rsig);
+}
+
+static const struct crypto_type crypto_sig_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_sig_init_tfm,
+ .free = crypto_sig_free_instance,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_sig_show,
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
+ .report = crypto_sig_report,
+#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_SIG,
+ .tfmsize = offsetof(struct crypto_sig, base),
+ .algsize = offsetof(struct sig_alg, base),
+};
+
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sig);
+
+static int sig_default_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ return -ENOSYS;
+}
+
+static int sig_default_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *dst, unsigned int dlen)
+{
+ return -ENOSYS;
+}
+
+static int sig_default_set_key(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
+static unsigned int sig_default_size(struct crypto_sig *tfm)
+{
+ return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE);
+}
+
+static int sig_prepare_alg(struct sig_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ if (!alg->sign)
+ alg->sign = sig_default_sign;
+ if (!alg->verify)
+ alg->verify = sig_default_verify;
+ if (!alg->set_priv_key)
+ alg->set_priv_key = sig_default_set_key;
+ if (!alg->set_pub_key)
+ return -EINVAL;
+ if (!alg->key_size)
+ return -EINVAL;
+ if (!alg->max_size)
+ alg->max_size = sig_default_size;
+ if (!alg->digest_size)
+ alg->digest_size = sig_default_size;
+
+ base->cra_type = &crypto_sig_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SIG;
+
+ return 0;
+}
+
+int crypto_register_sig(struct sig_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = sig_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_sig);
+
+void crypto_unregister_sig(struct sig_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_sig);
+
+int sig_register_instance(struct crypto_template *tmpl,
+ struct sig_instance *inst)
+{
+ int err;
+
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
+ err = sig_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, sig_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(sig_register_instance);
+
+int crypto_grab_sig(struct crypto_sig_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_sig_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_sig);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Public Key Signature Algorithms");
diff --git a/crypto/simd.c b/crypto/simd.c
index edaa479a1ec5..b07721d1f3f6 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename)
{
struct simd_skcipher_alg *salg;
- struct crypto_skcipher *tfm;
- struct skcipher_alg *ialg;
struct skcipher_alg *alg;
int err;
- tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
-
- ialg = crypto_skcipher_alg(tfm);
-
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
- goto out_put_tfm;
+ goto out;
}
salg->ialg_name = basename;
@@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
if (err)
goto out_free_salg;
-out_put_tfm:
- crypto_free_skcipher(tfm);
+out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
- goto out_put_tfm;
+ goto out;
}
EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename)
-{
- char drvname[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
- CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
-
- return simd_skcipher_create_compat(algname, drvname, basename);
-}
-EXPORT_SYMBOL_GPL(simd_skcipher_create);
-
void simd_skcipher_free(struct simd_skcipher_alg *salg)
{
crypto_unregister_skcipher(&salg->alg);
@@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
@@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm)
return 0;
}
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename)
+static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg,
+ const char *algname,
+ const char *drvname,
+ const char *basename)
{
struct simd_aead_alg *salg;
- struct crypto_aead *tfm;
- struct aead_alg *ialg;
struct aead_alg *alg;
int err;
- tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
-
- ialg = crypto_aead_alg(tfm);
-
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
- goto out_put_tfm;
+ goto out;
}
salg->ialg_name = basename;
@@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
if (err)
goto out_free_salg;
-out_put_tfm:
- crypto_free_aead(tfm);
+out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
- goto out_put_tfm;
-}
-EXPORT_SYMBOL_GPL(simd_aead_create_compat);
-
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename)
-{
- char drvname[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
- CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
-
- return simd_aead_create_compat(algname, drvname, basename);
+ goto out;
}
-EXPORT_SYMBOL_GPL(simd_aead_create);
-void simd_aead_free(struct simd_aead_alg *salg)
+static void simd_aead_free(struct simd_aead_alg *salg)
{
crypto_unregister_aead(&salg->alg);
kfree(salg);
}
-EXPORT_SYMBOL_GPL(simd_aead_free);
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs)
@@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
- simd = simd_aead_create_compat(algname, drvname, basename);
+ simd = simd_aead_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
@@ -523,4 +477,5 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
}
EXPORT_SYMBOL_GPL(simd_unregister_aeads);
+MODULE_DESCRIPTION("Shared crypto SIMD helpers");
MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 418211180cee..14a820cb06c7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -15,147 +15,111 @@
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
-#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
+#include "skcipher.h"
-#include "internal.h"
+#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
- SKCIPHER_WALK_PHYS = 1 << 0,
- SKCIPHER_WALK_SLOW = 1 << 1,
- SKCIPHER_WALK_COPY = 1 << 2,
- SKCIPHER_WALK_DIFF = 1 << 3,
- SKCIPHER_WALK_SLEEP = 1 << 4,
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
};
-struct skcipher_walk_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- u8 *data;
- u8 buffer[];
-};
+static const struct crypto_type crypto_skcipher_type;
static int skcipher_walk_next(struct skcipher_walk *walk);
-static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
-{
- if (PageHighMem(scatterwalk_page(walk)))
- kunmap_atomic(vaddr);
-}
-
-static inline void *skcipher_map(struct scatter_walk *walk)
-{
- struct page *page = scatterwalk_page(walk);
-
- return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
- offset_in_page(walk->offset);
-}
-
-static inline void skcipher_map_src(struct skcipher_walk *walk)
-{
- walk->src.virt.addr = skcipher_map(&walk->in);
-}
-
-static inline void skcipher_map_dst(struct skcipher_walk *walk)
-{
- walk->dst.virt.addr = skcipher_map(&walk->out);
-}
-
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
- skcipher_unmap(&walk->in, walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
- skcipher_unmap(&walk->out, walk->dst.virt.addr);
-}
-
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+ struct crypto_alg *alg)
{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
+ return container_of(alg, struct skcipher_alg, base);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- u8 *addr;
-
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = skcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize,
- (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
-}
-
-int skcipher_walk_done(struct skcipher_walk *walk, int err)
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
{
- unsigned int n = walk->nbytes;
- unsigned int nbytes = 0;
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
if (!n)
goto finish;
- if (likely(err >= 0)) {
- n -= err;
- nbytes = walk->total - n;
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
}
- if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
-unmap_src:
- skcipher_unmap_src(walk);
+ scatterwalk_advance(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
- skcipher_unmap_dst(walk);
- goto unmap_src;
+ scatterwalk_done_src(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_COPY) {
- skcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- skcipher_unmap_dst(walk);
- } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (err > 0) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
- err = -EINVAL;
- nbytes = 0;
+ res = -EINVAL;
+ total = 0;
} else
- n = skcipher_done_slow(walk, n);
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
}
- if (err > 0)
- err = 0;
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
- walk->total = nbytes;
- walk->nbytes = 0;
+ if (res > 0)
+ res = 0;
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+ walk->total = total;
+ walk->nbytes = 0;
- if (nbytes) {
- crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
- CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
return skcipher_walk_next(walk);
}
@@ -164,9 +128,6 @@ finish:
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
- if (walk->flags & SKCIPHER_WALK_PHYS)
- goto out;
-
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
@@ -175,107 +136,33 @@ finish:
free_page((unsigned long)walk->page);
out:
- return err;
+ return res;
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err)
-{
- struct skcipher_walk_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- u8 *data;
-
- if (err)
- goto done;
-
- data = p->data;
- if (!data) {
- data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
- data = skcipher_get_spot(data, walk->stride);
- }
-
- scatterwalk_copychunks(data, &p->dst, p->len, 1);
-
- if (offset_in_page(p->data) + p->len + walk->stride >
- PAGE_SIZE)
- free_page((unsigned long)p->data);
-
-done:
- list_del(&p->entry);
- kfree(p);
- }
-
- if (!err && walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_complete);
-
-static void skcipher_queue_write(struct skcipher_walk *walk,
- struct skcipher_walk_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
- struct skcipher_walk_buffer *p;
- unsigned a;
unsigned n;
- u8 *buffer;
- void *v;
-
- if (!phys) {
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (buffer)
- goto ok;
- }
-
- /* Start with the minimum alignment of kmalloc. */
- a = crypto_tfm_ctx_alignment() - 1;
- n = bsize;
+ void *buffer;
- if (phys) {
- /* Calculate the minimum alignment of p->buffer. */
- a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
- n += sizeof(*p);
- }
-
- /* Minimum size to align p->buffer by alignmask. */
- n += alignmask & ~a;
-
- /* Minimum size to ensure p->buffer does not straddle a page. */
- n += (bsize - 1) & ~(alignmask | a);
-
- v = kzalloc(n, skcipher_walk_gfp(walk));
- if (!v)
- return skcipher_walk_done(walk, -ENOMEM);
-
- if (phys) {
- p = v;
- p->len = bsize;
- skcipher_queue_write(walk, p);
- buffer = p->buffer;
- } else {
- walk->buffer = v;
- buffer = v;
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
}
-ok:
- walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
- walk->src.virt.addr = walk->dst.virt.addr;
-
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
walk->nbytes = bsize;
walk->flags |= SKCIPHER_WALK_SLOW;
@@ -285,33 +172,18 @@ ok:
static int skcipher_next_copy(struct skcipher_walk *walk)
{
- struct skcipher_walk_buffer *p;
- u8 *tmp = walk->page;
-
- skcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- skcipher_unmap_src(walk);
-
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
-
- if (!(walk->flags & SKCIPHER_WALK_PHYS))
- return 0;
-
- p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
- if (!p)
- return -ENOMEM;
-
- p->data = walk->page;
- p->len = walk->nbytes;
- skcipher_queue_write(walk, p);
+ void *tmp = walk->page;
- if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
- PAGE_SIZE)
- walk->page = NULL;
- else
- walk->page += walk->nbytes;
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
return 0;
}
@@ -319,23 +191,17 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
- skcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
if (diff) {
walk->flags |= SKCIPHER_WALK_DIFF;
- skcipher_map_dst(walk);
+ scatterwalk_map(&walk->in);
}
return 0;
@@ -345,10 +211,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
- int err;
-
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
n = walk->total;
bsize = min(walk->stride, max(n, walk->blocksize));
@@ -360,9 +222,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
return skcipher_walk_done(walk, -EINVAL);
slow_path:
- err = skcipher_next_slow(walk, bsize);
- goto set_phys_lowmem;
+ return skcipher_next_slow(walk, bsize);
}
+ walk->nbytes = n;
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
if (!walk->page) {
@@ -372,58 +234,30 @@ slow_path:
if (!walk->page)
goto slow_path;
}
-
- walk->nbytes = min_t(unsigned, n,
- PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
- err = skcipher_next_copy(walk);
- goto set_phys_lowmem;
+ return skcipher_next_copy(walk);
}
- walk->nbytes = n;
-
return skcipher_next_fast(walk);
-
-set_phys_lowmem:
- if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
- unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize;
- unsigned bs = walk->stride;
- unsigned aligned_bs;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask + 1);
-
- /* Minimum size to align buffer by alignmask. */
- size = alignmask & ~a;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- size += ivsize;
- else {
- size += aligned_bs + ivsize;
-
- /* Minimum size to ensure buffer does not straddle a page. */
- size += (bs - 1) & ~(alignmask | a);
- }
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
return -ENOMEM;
- iv = PTR_ALIGN(walk->buffer, alignmask + 1);
- iv = skcipher_get_spot(iv, bs) + aligned_bs;
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
@@ -446,15 +280,24 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
return skcipher_walk_next(walk);
}
-static int skcipher_walk_skcipher(struct skcipher_walk *walk,
- struct skcipher_request *req)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req, bool atomic)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg;
+
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+
+ alg = crypto_skcipher_alg(tfm);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -462,90 +305,50 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
- walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- SKCIPHER_WALK_SLEEP : 0;
-
walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
- return skcipher_walk_first(walk);
-}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
-{
- int err;
-
- might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- err = skcipher_walk_skcipher(walk, req);
-
- walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ walk->stride = alg->co.chunksize;
+ else
+ walk->stride = alg->walksize;
- return err;
+ return skcipher_walk_first(walk);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- walk->flags |= SKCIPHER_WALK_PHYS;
-
- INIT_LIST_HEAD(&walk->buffers);
-
- return skcipher_walk_skcipher(walk, req);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_async);
-
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int err;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- scatterwalk_start(&walk->in, req->src);
- scatterwalk_start(&walk->out, req->dst);
-
- scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
- scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
-
- scatterwalk_done(&walk->in, 0, walk->total);
- scatterwalk_done(&walk->out, 0, walk->total);
-
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- walk->flags |= SKCIPHER_WALK_SLEEP;
- else
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
+ scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
walk->blocksize = crypto_aead_blocksize(tfm);
walk->stride = crypto_aead_chunksize(tfm);
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- err = skcipher_walk_first(walk);
-
- if (atomic)
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-
- return err;
+ return skcipher_walk_first(walk);
}
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
walk->total = req->cryptlen;
@@ -553,8 +356,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -598,6 +402,17 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
int err;
+ if (cipher->co.base.cra_type != &crypto_skcipher_type) {
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
+ crypto_lskcipher_set_flags(*ctx,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_lskcipher_setkey(*ctx, key, keylen);
+ goto out;
+ }
+
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
return -EINVAL;
@@ -606,6 +421,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
else
err = cipher->setkey(tfm, key, keylen);
+out:
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
@@ -619,37 +435,87 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_skcipher_alg(tfm)->encrypt(req);
- crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
- return ret;
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_encrypt_sg(req);
+ return alg->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_skcipher_alg(tfm)->decrypt(req);
- crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
- return ret;
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_decrypt_sg(req);
+ return alg->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
+static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ u8 *ivs = skcipher_request_ctx(req);
+
+ ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
+
+ memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
+ crypto_skcipher_statesize(tfm));
+
+ return 0;
+}
+
+static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ u8 *ivs = skcipher_request_ctx(req);
+
+ ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
+
+ memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
+ crypto_skcipher_statesize(tfm));
+
+ return 0;
+}
+
+static int skcipher_noexport(struct skcipher_request *req, void *out)
+{
+ return 0;
+}
+
+static int skcipher_noimport(struct skcipher_request *req, const void *in)
+{
+ return 0;
+}
+
+int crypto_skcipher_export(struct skcipher_request *req, void *out)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_export(req, out);
+ return alg->export(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_skcipher_export);
+
+int crypto_skcipher_import(struct skcipher_request *req, const void *in)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_import(req, in);
+ return alg->import(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_skcipher_import);
+
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -665,6 +531,20 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher_set_needkey(skcipher);
+ if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
+ unsigned am = crypto_skcipher_alignmask(skcipher);
+ unsigned reqsize;
+
+ reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
+ reqsize += crypto_skcipher_ivsize(skcipher);
+ reqsize += crypto_skcipher_statesize(skcipher);
+ crypto_skcipher_set_reqsize(skcipher, reqsize);
+
+ return crypto_init_lskcipher_ops_sg(tfm);
+ }
+
+ crypto_skcipher_set_reqsize(skcipher, crypto_tfm_alg_reqsize(tfm));
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
@@ -674,6 +554,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type != &crypto_skcipher_type)
+ return sizeof(struct crypto_lskcipher *);
+
+ return crypto_alg_extsize(alg);
+}
+
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
@@ -686,26 +574,25 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
- alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
seq_printf(m, "walksize : %u\n", skcipher->walksize);
+ seq_printf(m, "statesize : %u\n", skcipher->statesize);
}
-#ifdef CONFIG_NET
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_skcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
@@ -720,25 +607,22 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
-#else
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static const struct crypto_type crypto_skcipher_type = {
- .extsize = crypto_alg_extsize,
+ .extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
@@ -763,7 +647,8 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
struct crypto_skcipher *tfm;
/* Only sync algorithms allowed. */
- mask |= CRYPTO_ALG_ASYNC;
+ mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
+ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
@@ -787,21 +672,45 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_skcipher);
-static int skcipher_prepare_alg(struct skcipher_alg *alg)
+int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
- alg->walksize > PAGE_SIZE / 8)
+ alg->statesize > PAGE_SIZE / 2 ||
+ (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
return -EINVAL;
if (!alg->chunksize)
alg->chunksize = base->cra_blocksize;
+
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ return 0;
+}
+
+static int skcipher_prepare_alg(struct skcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = skcipher_prepare_alg_common(&alg->co);
+ if (err)
+ return err;
+
+ if (alg->walksize > PAGE_SIZE / 8)
+ return -EINVAL;
+
if (!alg->walksize)
alg->walksize = alg->chunksize;
+ if (!alg->statesize) {
+ alg->import = skcipher_noimport;
+ alg->export = skcipher_noexport;
+ } else if (!(alg->import && alg->export))
+ return -EINVAL;
+
base->cra_type = &crypto_skcipher_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
return 0;
@@ -981,4 +890,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/skcipher.h b/crypto/skcipher.h
new file mode 100644
index 000000000000..703651367dd8
--- /dev/null
+++ b/crypto/skcipher.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_SKCIPHER_H
+#define _LOCAL_CRYPTO_SKCIPHER_H
+
+#include <crypto/internal/skcipher.h>
+#include "internal.h"
+
+int crypto_lskcipher_encrypt_sg(struct skcipher_request *req);
+int crypto_lskcipher_decrypt_sg(struct skcipher_request *req);
+int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);
+int skcipher_prepare_alg_common(struct skcipher_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_SKCIPHER_H */
diff --git a/crypto/sm2.c b/crypto/sm2.c
deleted file mode 100644
index ed9307dac3d1..000000000000
--- a/crypto/sm2.c
+++ /dev/null
@@ -1,460 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SM2 asymmetric public-key algorithm
- * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
- * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
- *
- * Copyright (c) 2020, Alibaba Group.
- * Authors: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <linux/mpi.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/akcipher.h>
-#include <crypto/hash.h>
-#include <crypto/sm3.h>
-#include <crypto/rng.h>
-#include <crypto/sm2.h>
-#include "sm2signature.asn1.h"
-
-#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8)
-
-struct ecc_domain_parms {
- const char *desc; /* Description of the curve. */
- unsigned int nbits; /* Number of bits. */
- unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */
-
- /* The model describing this curve. This is mainly used to select
- * the group equation.
- */
- enum gcry_mpi_ec_models model;
-
- /* The actual ECC dialect used. This is used for curve specific
- * optimizations and to select encodings etc.
- */
- enum ecc_dialects dialect;
-
- const char *p; /* The prime defining the field. */
- const char *a, *b; /* The coefficients. For Twisted Edwards
- * Curves b is used for d. For Montgomery
- * Curves (a,b) has ((A-2)/4,B^-1).
- */
- const char *n; /* The order of the base point. */
- const char *g_x, *g_y; /* Base point. */
- unsigned int h; /* Cofactor. */
-};
-
-static const struct ecc_domain_parms sm2_ecp = {
- .desc = "sm2p256v1",
- .nbits = 256,
- .fips = 0,
- .model = MPI_EC_WEIERSTRASS,
- .dialect = ECC_DIALECT_STANDARD,
- .p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff",
- .a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc",
- .b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93",
- .n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123",
- .g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7",
- .g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0",
- .h = 1
-};
-
-static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
-{
- const struct ecc_domain_parms *ecp = &sm2_ecp;
- MPI p, a, b;
- MPI x, y;
- int rc = -EINVAL;
-
- p = mpi_scanval(ecp->p);
- a = mpi_scanval(ecp->a);
- b = mpi_scanval(ecp->b);
- if (!p || !a || !b)
- goto free_p;
-
- x = mpi_scanval(ecp->g_x);
- y = mpi_scanval(ecp->g_y);
- if (!x || !y)
- goto free;
-
- rc = -ENOMEM;
-
- ec->Q = mpi_point_new(0);
- if (!ec->Q)
- goto free;
-
- /* mpi_ec_setup_elliptic_curve */
- ec->G = mpi_point_new(0);
- if (!ec->G) {
- mpi_point_release(ec->Q);
- goto free;
- }
-
- mpi_set(ec->G->x, x);
- mpi_set(ec->G->y, y);
- mpi_set_ui(ec->G->z, 1);
-
- rc = -EINVAL;
- ec->n = mpi_scanval(ecp->n);
- if (!ec->n) {
- mpi_point_release(ec->Q);
- mpi_point_release(ec->G);
- goto free;
- }
-
- ec->h = ecp->h;
- ec->name = ecp->desc;
- mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b);
-
- rc = 0;
-
-free:
- mpi_free(x);
- mpi_free(y);
-free_p:
- mpi_free(p);
- mpi_free(a);
- mpi_free(b);
-
- return rc;
-}
-
-static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
-{
- mpi_ec_deinit(ec);
-
- memset(ec, 0, sizeof(*ec));
-}
-
-/* RESULT must have been initialized and is set on success to the
- * point given by VALUE.
- */
-static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
-{
- int rc;
- size_t n;
- unsigned char *buf;
- MPI x, y;
-
- n = MPI_NBYTES(value);
- buf = kmalloc(n, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
- if (rc)
- goto err_freebuf;
-
- rc = -EINVAL;
- if (n < 1 || ((n - 1) % 2))
- goto err_freebuf;
- /* No support for point compression */
- if (*buf != 0x4)
- goto err_freebuf;
-
- rc = -ENOMEM;
- n = (n - 1) / 2;
- x = mpi_read_raw_data(buf + 1, n);
- if (!x)
- goto err_freebuf;
- y = mpi_read_raw_data(buf + 1 + n, n);
- if (!y)
- goto err_freex;
-
- mpi_normalize(x);
- mpi_normalize(y);
- mpi_set(result->x, x);
- mpi_set(result->y, y);
- mpi_set_ui(result->z, 1);
-
- rc = 0;
-
- mpi_free(y);
-err_freex:
- mpi_free(x);
-err_freebuf:
- kfree(buf);
- return rc;
-}
-
-struct sm2_signature_ctx {
- MPI sig_r;
- MPI sig_s;
-};
-
-int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
-{
- struct sm2_signature_ctx *sig = context;
-
- if (!value || !vlen)
- return -EINVAL;
-
- sig->sig_r = mpi_read_raw_data(value, vlen);
- if (!sig->sig_r)
- return -ENOMEM;
-
- return 0;
-}
-
-int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
-{
- struct sm2_signature_ctx *sig = context;
-
- if (!value || !vlen)
- return -EINVAL;
-
- sig->sig_s = mpi_read_raw_data(value, vlen);
- if (!sig->sig_s)
- return -ENOMEM;
-
- return 0;
-}
-
-static int sm2_z_digest_update(struct sm3_state *sctx,
- MPI m, unsigned int pbytes)
-{
- static const unsigned char zero[32];
- unsigned char *in;
- unsigned int inlen;
-
- in = mpi_get_buffer(m, &inlen, NULL);
- if (!in)
- return -EINVAL;
-
- if (inlen < pbytes) {
- /* padding with zero */
- sm3_update(sctx, zero, pbytes - inlen);
- sm3_update(sctx, in, inlen);
- } else if (inlen > pbytes) {
- /* skip the starting zero */
- sm3_update(sctx, in + inlen - pbytes, pbytes);
- } else {
- sm3_update(sctx, in, inlen);
- }
-
- kfree(in);
- return 0;
-}
-
-static int sm2_z_digest_update_point(struct sm3_state *sctx,
- MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes)
-{
- MPI x, y;
- int ret = -EINVAL;
-
- x = mpi_new(0);
- y = mpi_new(0);
-
- if (!mpi_ec_get_affine(x, y, point, ec) &&
- !sm2_z_digest_update(sctx, x, pbytes) &&
- !sm2_z_digest_update(sctx, y, pbytes))
- ret = 0;
-
- mpi_free(x);
- mpi_free(y);
- return ret;
-}
-
-int sm2_compute_z_digest(struct crypto_akcipher *tfm,
- const unsigned char *id, size_t id_len,
- unsigned char dgst[SM3_DIGEST_SIZE])
-{
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
- uint16_t bits_len;
- unsigned char entl[2];
- struct sm3_state sctx;
- unsigned int pbytes;
-
- if (id_len > (USHRT_MAX / 8) || !ec->Q)
- return -EINVAL;
-
- bits_len = (uint16_t)(id_len * 8);
- entl[0] = bits_len >> 8;
- entl[1] = bits_len & 0xff;
-
- pbytes = MPI_NBYTES(ec->p);
-
- /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
- sm3_init(&sctx);
- sm3_update(&sctx, entl, 2);
- sm3_update(&sctx, id, id_len);
-
- if (sm2_z_digest_update(&sctx, ec->a, pbytes) ||
- sm2_z_digest_update(&sctx, ec->b, pbytes) ||
- sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) ||
- sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes))
- return -EINVAL;
-
- sm3_final(&sctx, dgst);
- return 0;
-}
-EXPORT_SYMBOL(sm2_compute_z_digest);
-
-static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s)
-{
- int rc = -EINVAL;
- struct gcry_mpi_point sG, tP;
- MPI t = NULL;
- MPI x1 = NULL, y1 = NULL;
-
- mpi_point_init(&sG);
- mpi_point_init(&tP);
- x1 = mpi_new(0);
- y1 = mpi_new(0);
- t = mpi_new(0);
-
- /* r, s in [1, n-1] */
- if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 ||
- mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) {
- goto leave;
- }
-
- /* t = (r + s) % n, t == 0 */
- mpi_addm(t, sig_r, sig_s, ec->n);
- if (mpi_cmp_ui(t, 0) == 0)
- goto leave;
-
- /* sG + tP = (x1, y1) */
- rc = -EBADMSG;
- mpi_ec_mul_point(&sG, sig_s, ec->G, ec);
- mpi_ec_mul_point(&tP, t, ec->Q, ec);
- mpi_ec_add_points(&sG, &sG, &tP, ec);
- if (mpi_ec_get_affine(x1, y1, &sG, ec))
- goto leave;
-
- /* R = (e + x1) % n */
- mpi_addm(t, hash, x1, ec->n);
-
- /* check R == r */
- rc = -EKEYREJECTED;
- if (mpi_cmp(t, sig_r))
- goto leave;
-
- rc = 0;
-
-leave:
- mpi_point_free_parts(&sG);
- mpi_point_free_parts(&tP);
- mpi_free(x1);
- mpi_free(y1);
- mpi_free(t);
-
- return rc;
-}
-
-static int sm2_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
- unsigned char *buffer;
- struct sm2_signature_ctx sig;
- MPI hash;
- int ret;
-
- if (unlikely(!ec->Q))
- return -EINVAL;
-
- buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- sg_pcopy_to_buffer(req->src,
- sg_nents_for_len(req->src, req->src_len + req->dst_len),
- buffer, req->src_len + req->dst_len, 0);
-
- sig.sig_r = NULL;
- sig.sig_s = NULL;
- ret = asn1_ber_decoder(&sm2signature_decoder, &sig,
- buffer, req->src_len);
- if (ret)
- goto error;
-
- ret = -ENOMEM;
- hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len);
- if (!hash)
- goto error;
-
- ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s);
-
- mpi_free(hash);
-error:
- mpi_free(sig.sig_r);
- mpi_free(sig.sig_s);
- kfree(buffer);
- return ret;
-}
-
-static int sm2_set_pub_key(struct crypto_akcipher *tfm,
- const void *key, unsigned int keylen)
-{
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
- MPI a;
- int rc;
-
- /* include the uncompressed flag '0x04' */
- a = mpi_read_raw_data(key, keylen);
- if (!a)
- return -ENOMEM;
-
- mpi_normalize(a);
- rc = sm2_ecc_os2ec(ec->Q, a);
- mpi_free(a);
-
- return rc;
-}
-
-static unsigned int sm2_max_size(struct crypto_akcipher *tfm)
-{
- /* Unlimited max size */
- return PAGE_SIZE;
-}
-
-static int sm2_init_tfm(struct crypto_akcipher *tfm)
-{
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
-
- return sm2_ec_ctx_init(ec);
-}
-
-static void sm2_exit_tfm(struct crypto_akcipher *tfm)
-{
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
-
- sm2_ec_ctx_deinit(ec);
-}
-
-static struct akcipher_alg sm2 = {
- .verify = sm2_verify,
- .set_pub_key = sm2_set_pub_key,
- .max_size = sm2_max_size,
- .init = sm2_init_tfm,
- .exit = sm2_exit_tfm,
- .base = {
- .cra_name = "sm2",
- .cra_driver_name = "sm2-generic",
- .cra_priority = 100,
- .cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct mpi_ec_ctx),
- },
-};
-
-static int __init sm2_init(void)
-{
- return crypto_register_akcipher(&sm2);
-}
-
-static void __exit sm2_exit(void)
-{
- crypto_unregister_akcipher(&sm2);
-}
-
-subsys_initcall(sm2_init);
-module_exit(sm2_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
-MODULE_DESCRIPTION("SM2 generic algorithm");
-MODULE_ALIAS_CRYPTO("sm2-generic");
diff --git a/crypto/sm2signature.asn1 b/crypto/sm2signature.asn1
deleted file mode 100644
index ab8c0b754d21..000000000000
--- a/crypto/sm2signature.asn1
+++ /dev/null
@@ -1,4 +0,0 @@
-Sm2Signature ::= SEQUENCE {
- sig_r INTEGER ({ sm2_get_signature_r }),
- sig_s INTEGER ({ sm2_get_signature_s })
-}
diff --git a/crypto/sm3.c b/crypto/sm3.c
deleted file mode 100644
index d473e358a873..000000000000
--- a/crypto/sm3.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
- * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
- *
- * Copyright (C) 2017 ARM Limited or its affiliates.
- * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
- * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <asm/unaligned.h>
-#include <crypto/sm3.h>
-
-static const u32 ____cacheline_aligned K[64] = {
- 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
- 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
- 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
- 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
- 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
- 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
- 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
- 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
-};
-
-/*
- * Transform the message X which consists of 16 32-bit-words. See
- * GM/T 004-2012 for details.
- */
-#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
- do { \
- ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
- ss2 = ss1 ^ rol32((a), 12); \
- d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
- h += GG ## i(e, f, g) + ss1 + (w1); \
- b = rol32((b), 9); \
- f = rol32((f), 19); \
- h = P0((h)); \
- } while (0)
-
-#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(1, a, b, c, d, e, f, g, h, t, w1, w2)
-#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(2, a, b, c, d, e, f, g, h, t, w1, w2)
-
-#define FF1(x, y, z) (x ^ y ^ z)
-#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
-
-#define GG1(x, y, z) FF1(x, y, z)
-#define GG2(x, y, z) ((x & y) | (~x & z))
-
-/* Message expansion */
-#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
-#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
-#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
-#define W1(i) (W[i & 0x0f])
-#define W2(i) (W[i & 0x0f] = \
- P1(W[i & 0x0f] \
- ^ W[(i-9) & 0x0f] \
- ^ rol32(W[(i-3) & 0x0f], 15)) \
- ^ rol32(W[(i-13) & 0x0f], 7) \
- ^ W[(i-6) & 0x0f])
-
-static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
-{
- u32 a, b, c, d, e, f, g, h, ss1, ss2;
-
- a = sctx->state[0];
- b = sctx->state[1];
- c = sctx->state[2];
- d = sctx->state[3];
- e = sctx->state[4];
- f = sctx->state[5];
- g = sctx->state[6];
- h = sctx->state[7];
-
- R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
- R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
- R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
- R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
- R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
- R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
- R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
- R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
- R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
- R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
- R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
- R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
- R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
- R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
- R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
- R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
-
- R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
- R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
- R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
- R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
- R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
- R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
- R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
- R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
- R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
- R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
- R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
- R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
- R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
- R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
- R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
- R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
-
- R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
- R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
- R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
- R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
- R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
- R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
- R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
- R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
- R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
- R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
- R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
- R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
- R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
- R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
- R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
- R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
-
- R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
- R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
- R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
- R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
- R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
- R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
- R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
- R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
- R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
- R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
- R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
- R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
- R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
- R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
- R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
- R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
-
- sctx->state[0] ^= a;
- sctx->state[1] ^= b;
- sctx->state[2] ^= c;
- sctx->state[3] ^= d;
- sctx->state[4] ^= e;
- sctx->state[5] ^= f;
- sctx->state[6] ^= g;
- sctx->state[7] ^= h;
-}
-#undef R
-#undef R1
-#undef R2
-#undef I
-#undef W1
-#undef W2
-
-static inline void sm3_block(struct sm3_state *sctx,
- u8 const *data, int blocks, u32 W[16])
-{
- while (blocks--) {
- sm3_transform(sctx, data, W);
- data += SM3_BLOCK_SIZE;
- }
-}
-
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
-{
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
-
- sctx->count += len;
-
- if ((partial + len) >= SM3_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- sm3_block(sctx, data, blocks, W);
- data += blocks * SM3_BLOCK_SIZE;
- }
-
- memzero_explicit(W, sizeof(W));
-
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-}
-EXPORT_SYMBOL_GPL(sm3_update);
-
-void sm3_final(struct sm3_state *sctx, u8 *out)
-{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- __be32 *digest = (__be32 *)out;
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
- int i;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
- partial = 0;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- memset(sctx->buffer + partial, 0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- sm3_block(sctx, sctx->buffer, 1, W);
-
- for (i = 0; i < 8; i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- /* Zeroize sensitive information. */
- memzero_explicit(W, sizeof(W));
- memzero_explicit(sctx, sizeof(*sctx));
-}
-EXPORT_SYMBOL_GPL(sm3_final);
-
-MODULE_DESCRIPTION("Generic SM3 library");
-MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index a215c1c37e73..7529139fcc96 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -9,15 +9,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
@@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-
-static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
-{
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic);
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, hash);
- return 0;
+ sm3_base_do_finup(desc, data, len, sm3_block_generic);
+ return sm3_base_finish(desc, hash);
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
- .final = crypto_sm3_final,
.finup = crypto_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-subsys_initcall(sm3_generic_mod_init);
+module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4.c b/crypto/sm4.c
index 2c44193bc27e..f4cd7edc11f0 100644
--- a/crypto/sm4.c
+++ b/crypto/sm4.c
@@ -8,7 +8,7 @@
*/
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/sm4.h>
static const u32 ____cacheline_aligned fk[4] = {
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 4a6480a27fee..d57444e8428c 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -7,14 +7,14 @@
* All rights reserved.
*/
+#include <crypto/algapi.h>
#include <crypto/sm4.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* sm4_setkey - Set the SM4 key.
@@ -83,7 +83,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-subsys_initcall(sm4_init);
+module_init(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index dc625ffc54ad..57bbf70f4c22 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -13,9 +13,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
#include <crypto/streebog.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
static const struct streebog_uint512 buffer0 = { {
0, 0, 0, 0, 0, 0, 0, 0
@@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc)
return 0;
}
-static void streebog_pad(struct streebog_state *ctx)
-{
- if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
- return;
-
- memset(ctx->buffer + ctx->fillsize, 0,
- sizeof(ctx->buffer) - ctx->fillsize);
-
- ctx->buffer[ctx->fillsize] = 1;
-}
-
static void streebog_add512(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *r)
@@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
-static void streebog_stage3(struct streebog_state *ctx)
+static void streebog_stage3(struct streebog_state *ctx, const u8 *src,
+ unsigned int len)
{
struct streebog_uint512 buf = { { 0 } };
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ } u = {};
- buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
- streebog_pad(ctx);
+ buf.qword[0] = cpu_to_le64(len << 3);
+ memcpy(u.buffer, src, len);
+ u.buffer[len] = 1;
- streebog_g(&ctx->h, &ctx->N, &ctx->m);
+ streebog_g(&ctx->h, &ctx->N, &u.m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma);
+ memzero_explicit(&u, sizeof(u));
streebog_g(&ctx->h, &buffer0, &ctx->N);
streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
@@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- size_t chunksize;
- if (ctx->fillsize) {
- chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
- if (chunksize > len)
- chunksize = len;
- memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
- ctx->fillsize += chunksize;
- len -= chunksize;
- data += chunksize;
-
- if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
- streebog_stage2(ctx, ctx->buffer);
- ctx->fillsize = 0;
- }
- }
-
- while (len >= STREEBOG_BLOCK_SIZE) {
+ do {
streebog_stage2(ctx, data);
data += STREEBOG_BLOCK_SIZE;
len -= STREEBOG_BLOCK_SIZE;
- }
+ } while (len >= STREEBOG_BLOCK_SIZE);
- if (len) {
- memcpy(&ctx->buffer, data, len);
- ctx->fillsize = len;
- }
- return 0;
+ return len;
}
-static int streebog_final(struct shash_desc *desc, u8 *digest)
+static int streebog_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *digest)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- streebog_stage3(ctx);
- ctx->fillsize = 0;
+ streebog_stage3(ctx, src, len);
if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
else
@@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG256_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog256",
.cra_driver_name = "streebog256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
@@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG512_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog512",
.cra_driver_name = "streebog512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(streebog_mod_init);
+module_init(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a82679b576bb..62fef100e599 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -25,18 +25,21 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
-#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/moduleparam.h>
-#include <linux/jiffies.h>
#include <linux/timex.h>
-#include <linux/interrupt.h>
+
+#include "internal.h"
#include "tcrypt.h"
/*
- * Need slab memory for testing (size in number of pages).
+ * Need slab memory for benchmarking (size in number of pages).
*/
#define TVMEMSIZE 4
@@ -324,7 +327,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
crypto_req_done, &data[i].wait);
}
- pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ pr_info("testing speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
i = 0;
@@ -506,8 +509,8 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
out:
if (ret == 0)
- printk("1 operation in %lu cycles (%d bytes)\n",
- (cycles + 4) / 8, blen);
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / 8, blen);
return ret;
}
@@ -575,8 +578,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
crypto_init_wait(&wait);
- printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
- get_driver_name(crypto_aead, tfm), e);
+ pr_info("testing speed of %s (%s) %s\n", algo,
+ get_driver_name(crypto_aead, tfm), e);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -624,8 +627,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
- printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
- i, *keysize * 8, bs);
+ pr_info("test %u (%d bit key, %d byte blocks): ",
+ i, *keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
@@ -727,8 +730,8 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
return ret;
}
- printk("%6u opers/sec, %9lu bytes/sec\n",
- bcount / secs, ((long)bcount * blen) / secs);
+ pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
@@ -877,8 +880,8 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
return;
}
- printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
- get_driver_name(crypto_ahash, tfm));
+ pr_info("testing speed of async %s (%s)\n", algo,
+ get_driver_name(crypto_ahash, tfm));
if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
@@ -1090,15 +1093,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
goto out_free_tfm;
}
-
- for (i = 0; i < num_mb; ++i)
- if (testmgr_alloc_buf(data[i].xbuf)) {
- while (i--)
- testmgr_free_buf(data[i].xbuf);
- goto out_free_tfm;
- }
-
-
for (i = 0; i < num_mb; ++i) {
data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
@@ -1117,7 +1111,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
crypto_init_wait(&data[i].wait);
}
- pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ pr_info("testing speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_skcipher, tfm), e);
i = 0;
@@ -1324,13 +1318,12 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
return;
}
- pr_info("\ntesting speed of %s %s (%s) %s\n", async ? "async" : "sync",
+ pr_info("testing speed of %s %s (%s) %s\n", async ? "async" : "sync",
algo, get_driver_name(crypto_skcipher, tfm), e);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
- pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
- algo);
+ pr_err("skcipher: Failed to allocate request for %s\n", algo);
goto out;
}
@@ -1471,387 +1464,379 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
}
for (i = 1; i < 200; i++)
- ret += do_test(NULL, 0, 0, i, num_mb);
+ ret = min(ret, do_test(NULL, 0, 0, i, num_mb));
break;
case 1:
- ret += tcrypt_test("md5");
+ ret = min(ret, tcrypt_test("md5"));
break;
case 2:
- ret += tcrypt_test("sha1");
+ ret = min(ret, tcrypt_test("sha1"));
break;
case 3:
- ret += tcrypt_test("ecb(des)");
- ret += tcrypt_test("cbc(des)");
- ret += tcrypt_test("ctr(des)");
+ ret = min(ret, tcrypt_test("ecb(des)"));
+ ret = min(ret, tcrypt_test("cbc(des)"));
+ ret = min(ret, tcrypt_test("ctr(des)"));
break;
case 4:
- ret += tcrypt_test("ecb(des3_ede)");
- ret += tcrypt_test("cbc(des3_ede)");
- ret += tcrypt_test("ctr(des3_ede)");
+ ret = min(ret, tcrypt_test("ecb(des3_ede)"));
+ ret = min(ret, tcrypt_test("cbc(des3_ede)"));
+ ret = min(ret, tcrypt_test("ctr(des3_ede)"));
break;
case 5:
- ret += tcrypt_test("md4");
+ ret = min(ret, tcrypt_test("md4"));
break;
case 6:
- ret += tcrypt_test("sha256");
+ ret = min(ret, tcrypt_test("sha256"));
break;
case 7:
- ret += tcrypt_test("ecb(blowfish)");
- ret += tcrypt_test("cbc(blowfish)");
- ret += tcrypt_test("ctr(blowfish)");
+ ret = min(ret, tcrypt_test("ecb(blowfish)"));
+ ret = min(ret, tcrypt_test("cbc(blowfish)"));
+ ret = min(ret, tcrypt_test("ctr(blowfish)"));
break;
case 8:
- ret += tcrypt_test("ecb(twofish)");
- ret += tcrypt_test("cbc(twofish)");
- ret += tcrypt_test("ctr(twofish)");
- ret += tcrypt_test("lrw(twofish)");
- ret += tcrypt_test("xts(twofish)");
+ ret = min(ret, tcrypt_test("ecb(twofish)"));
+ ret = min(ret, tcrypt_test("cbc(twofish)"));
+ ret = min(ret, tcrypt_test("ctr(twofish)"));
+ ret = min(ret, tcrypt_test("lrw(twofish)"));
+ ret = min(ret, tcrypt_test("xts(twofish)"));
break;
case 9:
- ret += tcrypt_test("ecb(serpent)");
- ret += tcrypt_test("cbc(serpent)");
- ret += tcrypt_test("ctr(serpent)");
- ret += tcrypt_test("lrw(serpent)");
- ret += tcrypt_test("xts(serpent)");
+ ret = min(ret, tcrypt_test("ecb(serpent)"));
+ ret = min(ret, tcrypt_test("cbc(serpent)"));
+ ret = min(ret, tcrypt_test("ctr(serpent)"));
+ ret = min(ret, tcrypt_test("lrw(serpent)"));
+ ret = min(ret, tcrypt_test("xts(serpent)"));
break;
case 10:
- ret += tcrypt_test("ecb(aes)");
- ret += tcrypt_test("cbc(aes)");
- ret += tcrypt_test("lrw(aes)");
- ret += tcrypt_test("xts(aes)");
- ret += tcrypt_test("ctr(aes)");
- ret += tcrypt_test("rfc3686(ctr(aes))");
- ret += tcrypt_test("ofb(aes)");
- ret += tcrypt_test("cfb(aes)");
- ret += tcrypt_test("xctr(aes)");
+ ret = min(ret, tcrypt_test("ecb(aes)"));
+ ret = min(ret, tcrypt_test("cbc(aes)"));
+ ret = min(ret, tcrypt_test("lrw(aes)"));
+ ret = min(ret, tcrypt_test("xts(aes)"));
+ ret = min(ret, tcrypt_test("ctr(aes)"));
+ ret = min(ret, tcrypt_test("rfc3686(ctr(aes))"));
+ ret = min(ret, tcrypt_test("xctr(aes)"));
break;
case 11:
- ret += tcrypt_test("sha384");
+ ret = min(ret, tcrypt_test("sha384"));
break;
case 12:
- ret += tcrypt_test("sha512");
+ ret = min(ret, tcrypt_test("sha512"));
break;
case 13:
- ret += tcrypt_test("deflate");
+ ret = min(ret, tcrypt_test("deflate"));
break;
case 14:
- ret += tcrypt_test("ecb(cast5)");
- ret += tcrypt_test("cbc(cast5)");
- ret += tcrypt_test("ctr(cast5)");
+ ret = min(ret, tcrypt_test("ecb(cast5)"));
+ ret = min(ret, tcrypt_test("cbc(cast5)"));
+ ret = min(ret, tcrypt_test("ctr(cast5)"));
break;
case 15:
- ret += tcrypt_test("ecb(cast6)");
- ret += tcrypt_test("cbc(cast6)");
- ret += tcrypt_test("ctr(cast6)");
- ret += tcrypt_test("lrw(cast6)");
- ret += tcrypt_test("xts(cast6)");
+ ret = min(ret, tcrypt_test("ecb(cast6)"));
+ ret = min(ret, tcrypt_test("cbc(cast6)"));
+ ret = min(ret, tcrypt_test("ctr(cast6)"));
+ ret = min(ret, tcrypt_test("lrw(cast6)"));
+ ret = min(ret, tcrypt_test("xts(cast6)"));
break;
case 16:
- ret += tcrypt_test("ecb(arc4)");
+ ret = min(ret, tcrypt_test("ecb(arc4)"));
break;
case 17:
- ret += tcrypt_test("michael_mic");
+ ret = min(ret, tcrypt_test("michael_mic"));
break;
case 18:
- ret += tcrypt_test("crc32c");
+ ret = min(ret, tcrypt_test("crc32c"));
break;
case 19:
- ret += tcrypt_test("ecb(tea)");
+ ret = min(ret, tcrypt_test("ecb(tea)"));
break;
case 20:
- ret += tcrypt_test("ecb(xtea)");
+ ret = min(ret, tcrypt_test("ecb(xtea)"));
break;
case 21:
- ret += tcrypt_test("ecb(khazad)");
+ ret = min(ret, tcrypt_test("ecb(khazad)"));
break;
case 22:
- ret += tcrypt_test("wp512");
+ ret = min(ret, tcrypt_test("wp512"));
break;
case 23:
- ret += tcrypt_test("wp384");
+ ret = min(ret, tcrypt_test("wp384"));
break;
case 24:
- ret += tcrypt_test("wp256");
+ ret = min(ret, tcrypt_test("wp256"));
break;
case 26:
- ret += tcrypt_test("ecb(anubis)");
- ret += tcrypt_test("cbc(anubis)");
+ ret = min(ret, tcrypt_test("ecb(anubis)"));
+ ret = min(ret, tcrypt_test("cbc(anubis)"));
break;
case 30:
- ret += tcrypt_test("ecb(xeta)");
+ ret = min(ret, tcrypt_test("ecb(xeta)"));
break;
case 31:
- ret += tcrypt_test("pcbc(fcrypt)");
+ ret = min(ret, tcrypt_test("pcbc(fcrypt)"));
break;
case 32:
- ret += tcrypt_test("ecb(camellia)");
- ret += tcrypt_test("cbc(camellia)");
- ret += tcrypt_test("ctr(camellia)");
- ret += tcrypt_test("lrw(camellia)");
- ret += tcrypt_test("xts(camellia)");
+ ret = min(ret, tcrypt_test("ecb(camellia)"));
+ ret = min(ret, tcrypt_test("cbc(camellia)"));
+ ret = min(ret, tcrypt_test("ctr(camellia)"));
+ ret = min(ret, tcrypt_test("lrw(camellia)"));
+ ret = min(ret, tcrypt_test("xts(camellia)"));
break;
case 33:
- ret += tcrypt_test("sha224");
+ ret = min(ret, tcrypt_test("sha224"));
break;
case 35:
- ret += tcrypt_test("gcm(aes)");
+ ret = min(ret, tcrypt_test("gcm(aes)"));
break;
case 36:
- ret += tcrypt_test("lzo");
+ ret = min(ret, tcrypt_test("lzo"));
break;
case 37:
- ret += tcrypt_test("ccm(aes)");
+ ret = min(ret, tcrypt_test("ccm(aes)"));
break;
case 38:
- ret += tcrypt_test("cts(cbc(aes))");
+ ret = min(ret, tcrypt_test("cts(cbc(aes))"));
break;
case 39:
- ret += tcrypt_test("xxhash64");
+ ret = min(ret, tcrypt_test("xxhash64"));
break;
case 40:
- ret += tcrypt_test("rmd160");
+ ret = min(ret, tcrypt_test("rmd160"));
break;
case 42:
- ret += tcrypt_test("blake2b-512");
+ ret = min(ret, tcrypt_test("blake2b-512"));
break;
case 43:
- ret += tcrypt_test("ecb(seed)");
+ ret = min(ret, tcrypt_test("ecb(seed)"));
break;
case 45:
- ret += tcrypt_test("rfc4309(ccm(aes))");
+ ret = min(ret, tcrypt_test("rfc4309(ccm(aes))"));
break;
case 46:
- ret += tcrypt_test("ghash");
- break;
-
- case 47:
- ret += tcrypt_test("crct10dif");
+ ret = min(ret, tcrypt_test("ghash"));
break;
case 48:
- ret += tcrypt_test("sha3-224");
+ ret = min(ret, tcrypt_test("sha3-224"));
break;
case 49:
- ret += tcrypt_test("sha3-256");
+ ret = min(ret, tcrypt_test("sha3-256"));
break;
case 50:
- ret += tcrypt_test("sha3-384");
+ ret = min(ret, tcrypt_test("sha3-384"));
break;
case 51:
- ret += tcrypt_test("sha3-512");
+ ret = min(ret, tcrypt_test("sha3-512"));
break;
case 52:
- ret += tcrypt_test("sm3");
+ ret = min(ret, tcrypt_test("sm3"));
break;
case 53:
- ret += tcrypt_test("streebog256");
+ ret = min(ret, tcrypt_test("streebog256"));
break;
case 54:
- ret += tcrypt_test("streebog512");
+ ret = min(ret, tcrypt_test("streebog512"));
break;
case 55:
- ret += tcrypt_test("gcm(sm4)");
+ ret = min(ret, tcrypt_test("gcm(sm4)"));
break;
case 56:
- ret += tcrypt_test("ccm(sm4)");
+ ret = min(ret, tcrypt_test("ccm(sm4)"));
break;
- case 57:
- ret += tcrypt_test("polyval");
+ case 58:
+ ret = min(ret, tcrypt_test("gcm(aria)"));
break;
- case 58:
- ret += tcrypt_test("gcm(aria)");
+ case 59:
+ ret = min(ret, tcrypt_test("cts(cbc(sm4))"));
break;
case 100:
- ret += tcrypt_test("hmac(md5)");
+ ret = min(ret, tcrypt_test("hmac(md5)"));
break;
case 101:
- ret += tcrypt_test("hmac(sha1)");
+ ret = min(ret, tcrypt_test("hmac(sha1)"));
break;
case 102:
- ret += tcrypt_test("hmac(sha256)");
+ ret = min(ret, tcrypt_test("hmac(sha256)"));
break;
case 103:
- ret += tcrypt_test("hmac(sha384)");
+ ret = min(ret, tcrypt_test("hmac(sha384)"));
break;
case 104:
- ret += tcrypt_test("hmac(sha512)");
+ ret = min(ret, tcrypt_test("hmac(sha512)"));
break;
case 105:
- ret += tcrypt_test("hmac(sha224)");
+ ret = min(ret, tcrypt_test("hmac(sha224)"));
break;
case 106:
- ret += tcrypt_test("xcbc(aes)");
+ ret = min(ret, tcrypt_test("xcbc(aes)"));
break;
case 108:
- ret += tcrypt_test("hmac(rmd160)");
- break;
-
- case 109:
- ret += tcrypt_test("vmac64(aes)");
+ ret = min(ret, tcrypt_test("hmac(rmd160)"));
break;
case 111:
- ret += tcrypt_test("hmac(sha3-224)");
+ ret = min(ret, tcrypt_test("hmac(sha3-224)"));
break;
case 112:
- ret += tcrypt_test("hmac(sha3-256)");
+ ret = min(ret, tcrypt_test("hmac(sha3-256)"));
break;
case 113:
- ret += tcrypt_test("hmac(sha3-384)");
+ ret = min(ret, tcrypt_test("hmac(sha3-384)"));
break;
case 114:
- ret += tcrypt_test("hmac(sha3-512)");
+ ret = min(ret, tcrypt_test("hmac(sha3-512)"));
break;
case 115:
- ret += tcrypt_test("hmac(streebog256)");
+ ret = min(ret, tcrypt_test("hmac(streebog256)"));
break;
case 116:
- ret += tcrypt_test("hmac(streebog512)");
- break;
-
- case 150:
- ret += tcrypt_test("ansi_cprng");
+ ret = min(ret, tcrypt_test("hmac(streebog512)"));
break;
case 151:
- ret += tcrypt_test("rfc4106(gcm(aes))");
+ ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
break;
case 152:
- ret += tcrypt_test("rfc4543(gcm(aes))");
+ ret = min(ret, tcrypt_test("rfc4543(gcm(aes))"));
break;
case 153:
- ret += tcrypt_test("cmac(aes)");
+ ret = min(ret, tcrypt_test("cmac(aes)"));
break;
case 154:
- ret += tcrypt_test("cmac(des3_ede)");
+ ret = min(ret, tcrypt_test("cmac(des3_ede)"));
break;
case 155:
- ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))"));
break;
case 156:
- ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
+ ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"));
break;
case 157:
- ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"));
break;
case 158:
- ret += tcrypt_test("cbcmac(sm4)");
+ ret = min(ret, tcrypt_test("cbcmac(sm4)"));
break;
case 159:
- ret += tcrypt_test("cmac(sm4)");
+ ret = min(ret, tcrypt_test("cmac(sm4)"));
+ break;
+
+ case 160:
+ ret = min(ret, tcrypt_test("xcbc(sm4)"));
break;
case 181:
- ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))"));
break;
case 182:
- ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"));
break;
case 183:
- ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))"));
break;
case 184:
- ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"));
break;
case 185:
- ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))"));
break;
case 186:
- ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"));
break;
case 187:
- ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))"));
break;
case 188:
- ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"));
break;
case 189:
- ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))"));
break;
case 190:
- ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
+ ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"));
break;
case 191:
- ret += tcrypt_test("ecb(sm4)");
- ret += tcrypt_test("cbc(sm4)");
- ret += tcrypt_test("cfb(sm4)");
- ret += tcrypt_test("ctr(sm4)");
+ ret = min(ret, tcrypt_test("ecb(sm4)"));
+ ret = min(ret, tcrypt_test("cbc(sm4)"));
+ ret = min(ret, tcrypt_test("ctr(sm4)"));
+ ret = min(ret, tcrypt_test("xts(sm4)"));
break;
case 192:
- ret += tcrypt_test("ecb(aria)");
- ret += tcrypt_test("cbc(aria)");
- ret += tcrypt_test("cfb(aria)");
- ret += tcrypt_test("ctr(aria)");
+ ret = min(ret, tcrypt_test("ecb(aria)"));
+ ret = min(ret, tcrypt_test("cbc(aria)"));
+ ret = min(ret, tcrypt_test("ctr(aria)"));
+ break;
+ case 193:
+ ret = min(ret, tcrypt_test("ffdhe2048(dh)"));
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -1878,10 +1863,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
- test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32);
break;
case 201:
@@ -2045,11 +2026,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
- NULL, 0, 16, 16, aead_speed_template_20);
+ NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
- NULL, 0, 16, 16, aead_speed_template_20);
+ NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
@@ -2075,11 +2056,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
case 215:
test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
- 0, 16, 16, aead_speed_template_20, num_mb);
+ 0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
- 0, 16, 16, aead_speed_template_20, num_mb);
+ 0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
break;
@@ -2109,14 +2090,18 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16);
test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
- test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+ test_cipher_speed("cts(cbc(sm4))", ENCRYPT, sec, NULL, 0,
speed_template_16);
- test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+ test_cipher_speed("cts(cbc(sm4))", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
+ test_cipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_32);
+ test_cipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_32);
break;
case 219:
@@ -2188,10 +2173,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32);
test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
- test_cipher_speed("cfb(aria)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- test_cipher_speed("cfb(aria)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32);
test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
@@ -2279,14 +2260,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 320:
- test_hash_speed("crct10dif", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
- case 321:
- test_hash_speed("poly1305", sec, poly1305_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
@@ -2426,14 +2399,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
- test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
- test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32);
test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0,
speed_template_20_28_36);
test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0,
@@ -2453,18 +2418,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
- test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24);
- test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24);
- test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24);
- test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24);
break;
case 502:
@@ -2476,14 +2429,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_8);
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
- test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
- speed_template_8);
- test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
- speed_template_8);
- test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
- speed_template_8);
- test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
- speed_template_8);
break;
case 503:
@@ -2622,14 +2567,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16);
test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
- test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
- speed_template_16);
- test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
- speed_template_16);
test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
+ test_acipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_32);
+ test_acipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_32);
break;
case 519:
@@ -2644,6 +2589,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 600:
+ if (alg) {
+ u8 speed_template[2] = {klen, 0};
+ test_mb_skcipher_speed(alg, ENCRYPT, sec, NULL, 0,
+ speed_template, num_mb);
+ test_mb_skcipher_speed(alg, DECRYPT, sec, NULL, 0,
+ speed_template, num_mb);
+ break;
+ }
+
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
@@ -2668,14 +2622,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
- test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32, num_mb);
- test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32, num_mb);
- test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32, num_mb);
- test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
- speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
0, speed_template_20_28_36, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
@@ -2695,18 +2641,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
- test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24, num_mb);
- test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24, num_mb);
- test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24, num_mb);
- test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
- des3_speed_template, DES3_SPEED_VECTORS,
- speed_template_24, num_mb);
break;
case 602:
@@ -2718,14 +2652,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_8, num_mb);
test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
- test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
- speed_template_8, num_mb);
- test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
- speed_template_8, num_mb);
- test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
- speed_template_8, num_mb);
- test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
- speed_template_8, num_mb);
break;
case 603:
@@ -2885,7 +2811,7 @@ static int __init tcrypt_mod_init(void)
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
- printk(KERN_ERR "tcrypt: one or more tests failed!\n");
+ pr_err("one or more tests failed!\n");
goto err_free_tv;
} else {
pr_debug("all tests passed\n");
@@ -2930,5 +2856,5 @@ module_param(klen, uint, 0);
MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Quick & dirty crypto testing module");
+MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 9f654677172a..85c3f77bcfb4 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -62,7 +62,7 @@ static u8 speed_template_32[] = {32, 0};
* AEAD speed tests
*/
static u8 aead_speed_template_19[] = {19, 0};
-static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_20_28_36[] = {20, 28, 36, 0};
static u8 aead_speed_template_36[] = {36, 0};
/*
@@ -96,22 +96,4 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
-static struct hash_speed poly1305_speed_template[] = {
- { .blen = 96, .plen = 16, },
- { .blen = 96, .plen = 32, },
- { .blen = 96, .plen = 96, },
- { .blen = 288, .plen = 16, },
- { .blen = 288, .plen = 32, },
- { .blen = 288, .plen = 288, },
- { .blen = 1056, .plen = 32, },
- { .blen = 1056, .plen = 1056, },
- { .blen = 2080, .plen = 32, },
- { .blen = 2080, .plen = 2080, },
- { .blen = 4128, .plen = 4128, },
- { .blen = 8224, .plen = 8224, },
-
- /* End marker */
- { .blen = 0, .plen = 0, }
-};
-
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/tea.c b/crypto/tea.c
index 02efc5d81690..cb05140e3470 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -14,11 +14,11 @@
* Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
*/
+#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
-#include <linux/crypto.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
@@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
sum -= TEA_DELTA;
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
@@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
@@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
@@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
@@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
@@ -272,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-subsys_initcall(tea_mod_init);
+module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index bcd059caa1c8..a302be53896d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -23,7 +23,7 @@
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/once.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -33,31 +33,32 @@
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
+#include <crypto/sig.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "internal.h"
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static bool notests;
module_param(notests, bool, 0644);
-MODULE_PARM_DESC(notests, "disable crypto self-tests");
+MODULE_PARM_DESC(notests, "disable all crypto self-tests");
-static bool panic_on_fail;
-module_param(panic_on_fail, bool, 0444);
-
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-static bool noextratests;
-module_param(noextratests, bool, 0644);
-MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
+static bool noslowtests;
+module_param(noslowtests, bool, 0644);
+MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+#else
+#define noslowtests 1
+#define fuzz_iterations 0
#endif
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifndef CONFIG_CRYPTO_SELFTESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -116,11 +117,6 @@ struct hash_test_suite {
unsigned int count;
};
-struct cprng_test_suite {
- const struct cprng_testvec *vecs;
- unsigned int count;
-};
-
struct drbg_test_suite {
const struct drbg_testvec *vecs;
unsigned int count;
@@ -131,6 +127,11 @@ struct akcipher_test_suite {
unsigned int count;
};
+struct sig_test_suite {
+ const struct sig_testvec *vecs;
+ unsigned int count;
+};
+
struct kpp_test_suite {
const struct kpp_testvec *vecs;
unsigned int count;
@@ -148,9 +149,9 @@ struct alg_test_desc {
struct cipher_test_suite cipher;
struct comp_test_suite comp;
struct hash_test_suite hash;
- struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
+ struct sig_test_suite sig;
struct kpp_test_suite kpp;
} suite;
};
@@ -293,6 +294,10 @@ struct test_sg_division {
* the @key_offset
* @finalization_type: what finalization function to use for hashes
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
+ * This applies to the parts of the operation that aren't controlled
+ * individually by @nosimd_setkey or @src_divs[].nosimd.
+ * @nosimd_setkey: set the key (if applicable) with SIMD disabled? Requires
+ * !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
struct testvec_config {
const char *name;
@@ -306,16 +311,16 @@ struct testvec_config {
bool key_offset_relative_to_alignmask;
enum finalization_type finalization_type;
bool nosimd;
+ bool nosimd_setkey;
};
#define TESTVEC_CONFIG_NAMELEN 192
/*
* The following are the lists of testvec_configs to test for each algorithm
- * type when the basic crypto self-tests are enabled, i.e. when
- * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
- * coverage, while keeping the test time much shorter than the full fuzz tests
- * so that the basic tests can be enabled in a wider range of circumstances.
+ * type when the "fast" crypto self-tests are enabled. They aim to provide good
+ * test coverage, while keeping the test time much shorter than the "full" tests
+ * so that the "fast" tests can be enabled in a wider range of circumstances.
*/
/* Configs for skciphers and aeads */
@@ -357,6 +362,14 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
{ .proportion_of_total = 5000 },
},
}, {
+ .name = "one src, two even splits dst",
+ .inplace_mode = OUT_OF_PLACE,
+ .src_divs = { { .proportion_of_total = 10000 } },
+ .dst_divs = {
+ { .proportion_of_total = 5000 },
+ { .proportion_of_total = 5000 },
+ },
+ }, {
.name = "uneven misaligned splits, may sleep",
.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
.src_divs = {
@@ -400,17 +413,15 @@ static const struct testvec_config default_hash_testvec_configs[] = {
.finalization_type = FINALIZATION_TYPE_FINAL,
.key_offset = 1,
}, {
- .name = "digest buffer aligned only to alignmask",
+ .name = "digest misaligned buffer",
.src_divs = {
{
.proportion_of_total = 10000,
.offset = 1,
- .offset_relative_to_alignmask = true,
},
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
.key_offset = 1,
- .key_offset_relative_to_alignmask = true,
}, {
.name = "init+update+update+final two even splits",
.src_divs = {
@@ -527,7 +538,8 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
return false;
- if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
+ if ((cfg->nosimd || cfg->nosimd_setkey ||
+ (flags & SGDIVS_HAVE_NOSIMD)) &&
(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
return false;
@@ -766,7 +778,7 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
struct iov_iter input;
int err;
- iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
+ iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len);
err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
cfg->inplace_mode != OUT_OF_PLACE ?
max(dst_total_len, src_total_len) :
@@ -835,7 +847,10 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
return 0;
}
-/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */
+/*
+ * Like setkey_f(tfm, key, ksize), but sometimes misalign the key.
+ * In addition, run the setkey function in no-SIMD context if requested.
+ */
#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \
({ \
const u8 *keybuf, *keyptr; \
@@ -844,69 +859,116 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \
&keybuf, &keyptr); \
if (err == 0) { \
+ if ((cfg)->nosimd_setkey) \
+ crypto_disable_simd_for_test(); \
err = setkey_f((tfm), keyptr, (ksize)); \
+ if ((cfg)->nosimd_setkey) \
+ crypto_reenable_simd_for_test(); \
kfree(keybuf); \
} \
err; \
})
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
+ * need cryptographically secure random numbers. This greatly improves the
+ * performance of these tests, especially if they are run before the Linux RNG
+ * has been initialized or if they are run on a lockdep-enabled kernel.
+ */
+
+static inline void init_rnd_state(struct rnd_state *rng)
+{
+ prandom_seed_state(rng, get_random_u64());
+}
+
+static inline u8 prandom_u8(struct rnd_state *rng)
+{
+ return prandom_u32_state(rng);
+}
+
+static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
+{
+ /*
+ * This is slightly biased for non-power-of-2 values of 'ceil', but this
+ * isn't important here.
+ */
+ return prandom_u32_state(rng) % ceil;
+}
+
+static inline bool prandom_bool(struct rnd_state *rng)
+{
+ return prandom_u32_below(rng, 2);
+}
+
+static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
+ u32 floor, u32 ceil)
+{
+ return floor + prandom_u32_below(rng, ceil - floor + 1);
+}
/* Generate a random length in range [0, max_len], but prefer smaller values */
-static unsigned int generate_random_length(unsigned int max_len)
+static unsigned int generate_random_length(struct rnd_state *rng,
+ unsigned int max_len)
{
- unsigned int len = prandom_u32_max(max_len + 1);
+ unsigned int len = prandom_u32_below(rng, max_len + 1);
- switch (prandom_u32_max(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
- return len % 64;
+ len %= 64;
+ break;
case 1:
- return len % 256;
+ len %= 256;
+ break;
case 2:
- return len % 1024;
+ len %= 1024;
+ break;
default:
- return len;
+ break;
}
+ if (len && prandom_u32_below(rng, 4) == 0)
+ len = rounddown_pow_of_two(len);
+ return len;
}
/* Flip a random bit in the given nonempty data buffer */
-static void flip_random_bit(u8 *buf, size_t size)
+static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
- bitpos = prandom_u32_max(size * 8);
+ bitpos = prandom_u32_below(rng, size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
-static void flip_random_byte(u8 *buf, size_t size)
+static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
- buf[prandom_u32_max(size)] ^= 0xff;
+ buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
-static void mutate_buffer(u8 *buf, size_t size)
+static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */
- if (prandom_u32_max(4) == 0) {
- num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
+ size * 8);
for (i = 0; i < num_flips; i++)
- flip_random_bit(buf, size);
+ flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */
- if (prandom_u32_max(4) == 0) {
- num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
for (i = 0; i < num_flips; i++)
- flip_random_byte(buf, size);
+ flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
-static void generate_random_bytes(u8 *buf, size_t count)
+static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
@@ -915,11 +977,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0)
return;
- switch (prandom_u32_max(8)) { /* Choose a generation strategy */
+ switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
- switch (prandom_u32_max(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
b = 0x00;
break;
@@ -927,28 +989,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff;
break;
default:
- b = get_random_u8();
+ b = prandom_u8(rng);
break;
}
memset(buf, b, count);
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
- increment = get_random_u8();
- b = get_random_u8();
+ increment = prandom_u8(rng);
+ b = prandom_u8(rng);
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
default:
/* Fully random bytes */
- for (i = 0; i < count; i++)
- buf[i] = get_random_u8();
+ prandom_bytes_state(rng, buf, count);
}
}
-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+static char *generate_random_sgl_divisions(struct rnd_state *rng,
+ struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
bool gen_flushes, u32 req_flags)
{
@@ -959,24 +1021,28 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len;
const char *flushtype_str;
- if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
+ if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
+ else if (prandom_u32_below(rng, 4) == 0)
+ this_len = (remaining + 1) / 2;
else
- this_len = 1 + prandom_u32_max(remaining);
+ this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
- if (prandom_u32_max(4) == 0)
- div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
- else if (prandom_u32_max(2) == 0)
- div->offset = prandom_u32_max(32);
+ if (prandom_u32_below(rng, 4) == 0)
+ div->offset = prandom_u32_inclusive(rng,
+ PAGE_SIZE - 128,
+ PAGE_SIZE - 1);
+ else if (prandom_bool(rng))
+ div->offset = prandom_u32_below(rng, 32);
else
- div->offset = prandom_u32_max(PAGE_SIZE);
- if (prandom_u32_max(8) == 0)
+ div->offset = prandom_u32_below(rng, PAGE_SIZE);
+ if (prandom_u32_below(rng, 8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
- switch (prandom_u32_max(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
@@ -988,7 +1054,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32_max(2) == 0)
+ prandom_bool(rng))
div->nosimd = true;
switch (div->flush_type) {
@@ -1023,7 +1089,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
/* Generate a random testvec_config for fuzz testing */
-static void generate_random_testvec_config(struct testvec_config *cfg,
+static void generate_random_testvec_config(struct rnd_state *rng,
+ struct testvec_config *cfg,
char *name, size_t max_namelen)
{
char *p = name;
@@ -1035,7 +1102,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
- switch (prandom_u32_max(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
@@ -1050,12 +1117,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (prandom_u32_max(2) == 0) {
+ if (prandom_bool(rng)) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
- switch (prandom_u32_max(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
@@ -1070,36 +1137,43 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32_max(2) == 0) {
- cfg->nosimd = true;
- p += scnprintf(p, end - p, " nosimd");
+ if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) {
+ if (prandom_bool(rng)) {
+ cfg->nosimd = true;
+ p += scnprintf(p, end - p, " nosimd");
+ }
+ if (prandom_bool(rng)) {
+ cfg->nosimd_setkey = true;
+ p += scnprintf(p, end - p, " nosimd_setkey");
+ }
}
p += scnprintf(p, end - p, " src_divs=[");
- p = generate_random_sgl_divisions(cfg->src_divs,
+ p = generate_random_sgl_divisions(rng, cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
FINALIZATION_TYPE_DIGEST),
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
p += scnprintf(p, end - p, " dst_divs=[");
- p = generate_random_sgl_divisions(cfg->dst_divs,
+ p = generate_random_sgl_divisions(rng, cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p, end, false,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
- if (prandom_u32_max(2) == 0) {
- cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->iv_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
- if (prandom_u32_max(2) == 0) {
- cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->key_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
@@ -1108,14 +1182,18 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
static void crypto_disable_simd_for_test(void)
{
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
migrate_disable();
__this_cpu_write(crypto_simd_disabled_for_test, true);
+#endif
}
static void crypto_reenable_simd_for_test(void)
{
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
__this_cpu_write(crypto_simd_disabled_for_test, false);
migrate_enable();
+#endif
}
/*
@@ -1159,15 +1237,6 @@ too_long:
algname);
return -ENAMETOOLONG;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static void crypto_disable_simd_for_test(void)
-{
-}
-
-static void crypto_reenable_simd_for_test(void)
-{
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int build_hash_sglist(struct test_sglist *tsgl,
const struct hash_testvec *vec,
@@ -1180,7 +1249,7 @@ static int build_hash_sglist(struct test_sglist *tsgl,
kv.iov_base = (void *)vec->plaintext;
kv.iov_len = vec->psize;
- iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
+ iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize);
return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
&input, divs);
}
@@ -1224,7 +1293,6 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
u8 *hashstate)
{
struct crypto_shash *tfm = desc->tfm;
- const unsigned int alignmask = crypto_shash_alignmask(tfm);
const unsigned int digestsize = crypto_shash_digestsize(tfm);
const unsigned int statesize = crypto_shash_statesize(tfm);
const char *driver = crypto_shash_driver_name(tfm);
@@ -1236,7 +1304,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize,
- cfg, alignmask);
+ cfg, 0);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1253,7 +1321,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
}
/* Build the scatterlist for the source data */
- err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
+ err = build_hash_sglist(tsgl, vec, cfg, 0, divs);
if (err) {
pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
@@ -1408,7 +1476,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
u8 *hashstate)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- const unsigned int alignmask = crypto_ahash_alignmask(tfm);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
@@ -1424,7 +1491,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
- cfg, alignmask);
+ cfg, 0);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1441,7 +1508,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
}
/* Build the scatterlist for the source data */
- err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
+ err = build_hash_sglist(tsgl, vec, cfg, 0, divs);
if (err) {
pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
@@ -1610,13 +1677,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
@@ -1625,24 +1694,23 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_hash_testvec(struct shash_desc *desc,
+static void generate_random_hash_testvec(struct rnd_state *rng,
+ struct ahash_request *req,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
/* Data */
- vec->psize = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+ vec->psize = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/*
* Key: length in range [1, maxkeysize], but usually choose maxkeysize.
@@ -1652,20 +1720,21 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->ksize = 1 + prandom_u32_max(maxkeysize);
- generate_random_bytes((u8 *)vec->key, vec->ksize);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
- vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
- vec->ksize);
+ vec->setkey_error = crypto_ahash_setkey(
+ crypto_ahash_reqtfm(req), vec->key, vec->ksize);
/* If the key couldn't be set, no need to continue to digest. */
if (vec->setkey_error)
goto done;
}
/* Digest */
- vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
- vec->psize, (u8 *)vec->digest);
+ vec->digest_error = crypto_hash_digest(
+ crypto_ahash_reqtfm(req), vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
done:
snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
vec->psize, vec->ksize);
@@ -1688,9 +1757,10 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
- struct crypto_shash *generic_tfm = NULL;
- struct shash_desc *generic_desc = NULL;
+ struct ahash_request *generic_req = NULL;
+ struct crypto_ahash *generic_tfm = NULL;
unsigned int i;
struct hash_testvec vec = { 0 };
char vec_name[64];
@@ -1698,9 +1768,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
+ init_rnd_state(&rng);
+
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -1711,7 +1783,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
- generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
@@ -1730,27 +1802,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
goto out;
}
- generic_desc = kzalloc(sizeof(*desc) +
- crypto_shash_descsize(generic_tfm), GFP_KERNEL);
- if (!generic_desc) {
+ generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
err = -ENOMEM;
goto out;
}
- generic_desc->tfm = generic_tfm;
/* Check the algorithm properties for consistency. */
- if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ if (digestsize != crypto_ahash_digestsize(generic_tfm)) {
pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
driver, digestsize,
- crypto_shash_digestsize(generic_tfm));
+ crypto_ahash_digestsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ if (blocksize != crypto_ahash_blocksize(generic_tfm)) {
pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
- driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ driver, blocksize, crypto_ahash_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -1769,10 +1839,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_req, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
@@ -1786,21 +1857,10 @@ out:
kfree(vec.key);
kfree(vec.plaintext);
kfree(vec.digest);
- crypto_free_shash(generic_tfm);
- kfree_sensitive(generic_desc);
+ ahash_request_free(generic_req);
+ crypto_free_ahash(generic_tfm);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_hash_vs_generic_impl(const char *generic_driver,
- unsigned int maxkeysize,
- struct ahash_request *req,
- struct shash_desc *desc,
- struct test_sglist *tsgl,
- u8 *hashstate)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int alloc_shash(const char *driver, u32 type, u32 mask,
struct crypto_shash **tfm_ret,
@@ -1811,7 +1871,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask,
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
+ if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) {
/*
* This algorithm is only available through the ahash
* API, not the shash API, so skip the shash tests.
@@ -1857,6 +1917,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
atfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(atfm)) {
+ if (PTR_ERR(atfm) == -ENOENT)
+ return 0;
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(atfm));
return PTR_ERR(atfm);
@@ -2172,13 +2234,15 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2187,13 +2251,11 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
-struct aead_extra_tests_ctx {
+struct aead_slow_tests_ctx {
+ struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
const struct alg_test_desc *test_desc;
@@ -2212,24 +2274,26 @@ struct aead_extra_tests_ctx {
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+static void mutate_aead_message(struct rnd_state *rng,
+ struct aead_testvec *vec, bool aad_iv,
unsigned int ivsize)
{
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
- if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
+ if (prandom_bool(rng) && vec->alen > aad_tail_size) {
/* Mutate the AAD */
- flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
- if (prandom_u32_max(2) == 0)
+ flip_random_bit(rng, (u8 *)vec->assoc,
+ vec->alen - aad_tail_size);
+ if (prandom_bool(rng))
return;
}
- if (prandom_u32_max(2) == 0) {
+ if (prandom_bool(rng)) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
- flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
+ flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
} else {
/* Mutate any part of the ciphertext */
- flip_random_bit((u8 *)vec->ctext, vec->clen);
+ flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
}
}
@@ -2240,7 +2304,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
*/
#define MIN_COLLISION_FREE_AUTHSIZE 8
-static void generate_aead_message(struct aead_request *req,
+static void generate_aead_message(struct rnd_state *rng,
+ struct aead_request *req,
const struct aead_test_suite *suite,
struct aead_testvec *vec,
bool prefer_inauthentic)
@@ -2249,17 +2314,18 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
- (prefer_inauthentic || prandom_u32_max(4) == 0);
+ (prefer_inauthentic ||
+ prandom_u32_below(rng, 4) == 0);
/* Generate the AAD. */
- generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
if (suite->aad_iv && vec->alen >= ivsize)
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
- if (inauthentic && prandom_u32_max(2) == 0) {
+ if (inauthentic && prandom_bool(rng)) {
/* Generate a random ciphertext. */
- generate_random_bytes((u8 *)vec->ctext, vec->clen);
+ generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
} else {
int i = 0;
struct scatterlist src[2], dst;
@@ -2271,7 +2337,7 @@ static void generate_aead_message(struct aead_request *req,
if (vec->alen)
sg_set_buf(&src[i++], vec->assoc, vec->alen);
if (vec->plen) {
- generate_random_bytes((u8 *)vec->ptext, vec->plen);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
sg_set_buf(&src[i++], vec->ptext, vec->plen);
}
sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
@@ -2291,7 +2357,7 @@ static void generate_aead_message(struct aead_request *req,
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
- mutate_aead_message(vec, suite->aad_iv, ivsize);
+ mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
@@ -2305,7 +2371,8 @@ static void generate_aead_message(struct aead_request *req,
* If 'prefer_inauthentic' is true, then this function will generate inauthentic
* test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
-static void generate_random_aead_testvec(struct aead_request *req,
+static void generate_random_aead_testvec(struct rnd_state *rng,
+ struct aead_request *req,
struct aead_testvec *vec,
const struct aead_test_suite *suite,
unsigned int maxkeysize,
@@ -2321,18 +2388,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->klen = prandom_u32_max(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
- if (prandom_u32_max(4) == 0)
- authsize = prandom_u32_max(maxauthsize + 1);
+ if (prandom_u32_below(rng, 4) == 0)
+ authsize = prandom_u32_below(rng, maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
@@ -2341,11 +2408,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
/* AAD, plaintext, and ciphertext lengths */
- total_len = generate_random_length(maxdatasize);
- if (prandom_u32_max(4) == 0)
+ total_len = generate_random_length(rng, maxdatasize);
+ if (prandom_u32_below(rng, 4) == 0)
vec->alen = 0;
else
- vec->alen = generate_random_length(total_len);
+ vec->alen = generate_random_length(rng, total_len);
vec->plen = total_len - vec->alen;
vec->clen = vec->plen + authsize;
@@ -2356,19 +2423,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->novrfy = 0;
vec->crypt_error = 0;
if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
- generate_aead_message(req, suite, vec, prefer_inauthentic);
+ generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
"\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
}
-static void try_to_generate_inauthentic_testvec(
- struct aead_extra_tests_ctx *ctx)
+static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx)
{
int i;
for (i = 0; i < 10; i++) {
- generate_random_aead_testvec(ctx->req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
@@ -2382,7 +2448,7 @@ static void try_to_generate_inauthentic_testvec(
* Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
* result of an encryption with the key) and verify that decryption fails.
*/
-static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx)
{
unsigned int i;
int err;
@@ -2399,7 +2465,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
*/
try_to_generate_inauthentic_testvec(ctx);
if (ctx->vec.novrfy) {
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
@@ -2416,7 +2483,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
* Test the AEAD algorithm against the corresponding generic implementation, if
* one is available.
*/
-static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
@@ -2489,12 +2556,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
* the other implementation against them.
*/
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_aead_testvec(generic_req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), false);
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
@@ -2519,20 +2587,21 @@ out:
return err;
}
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_slow(const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
{
- struct aead_extra_tests_ctx *ctx;
+ struct aead_slow_tests_ctx *ctx;
unsigned int i;
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ init_rnd_state(&ctx->rng);
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->test_desc = test_desc;
@@ -2568,14 +2637,6 @@ out:
kfree(ctx);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
@@ -2609,6 +2670,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return 0;
pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -2639,7 +2702,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
if (err)
goto out;
- err = test_aead_extra(desc, req, tsgls);
+ err = test_aead_slow(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -2780,18 +2843,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
if (ivsize) {
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
- if (vec->generates_iv && !enc)
- memcpy(iv, vec->iv_out, ivsize);
- else if (vec->iv)
+ if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
} else {
- if (vec->generates_iv) {
- pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
- driver, vec_name);
- return -EINVAL;
- }
iv = NULL;
}
@@ -2920,13 +2976,15 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2935,16 +2993,15 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_cipher_testvec(struct skcipher_request *req,
+static void generate_random_cipher_testvec(struct rnd_state *rng,
+ struct skcipher_request *req,
struct cipher_testvec *vec,
unsigned int maxdatasize,
char *name, size_t max_namelen)
@@ -2958,17 +3015,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->klen = prandom_u32_max(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Plaintext */
- vec->len = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->ptext, vec->len);
+ vec->len = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
/* If the key couldn't be set, no need to continue to encrypt. */
if (vec->setkey_error)
@@ -3010,6 +3067,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
@@ -3020,12 +3078,10 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
- /* Keywrap isn't supported here yet as it handles its IV differently. */
- if (strncmp(algname, "kw(", 3) == 0)
- return 0;
+ init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
@@ -3111,9 +3167,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+ generate_random_cipher_testvec(&rng, generic_req, &vec,
+ maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
@@ -3136,14 +3194,6 @@ out:
skcipher_request_free(generic_req);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_skcipher_vs_generic_impl(const char *generic_driver,
- struct skcipher_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -3177,6 +3227,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return 0;
pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -3215,112 +3267,6 @@ out:
return err;
}
-static int test_comp(struct crypto_comp *tfm,
- const struct comp_testvec *ctemplate,
- const struct comp_testvec *dtemplate,
- int ctcount, int dtcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
- char *output, *decomp_output;
- unsigned int i;
- int ret;
-
- output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output)
- return -ENOMEM;
-
- decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_output) {
- kfree(output);
- return -ENOMEM;
- }
-
- for (i = 0; i < ctcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(output, 0, COMP_BUF_SIZE);
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = ctemplate[i].inlen;
- ret = crypto_comp_compress(tfm, ctemplate[i].input,
- ilen, output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: compression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- ilen = dlen;
- dlen = COMP_BUF_SIZE;
- ret = crypto_comp_decompress(tfm, output,
- ilen, decomp_output, &dlen);
- if (ret) {
- pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n",
- i + 1, algo, -ret);
- goto out;
- }
-
- if (dlen != ctemplate[i].inlen) {
- printk(KERN_ERR "alg: comp: Compression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, ctemplate[i].input,
- ctemplate[i].inlen)) {
- pr_err("alg: comp: compression failed: output differs: on test %d for %s\n",
- i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- for (i = 0; i < dtcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = dtemplate[i].inlen;
- ret = crypto_comp_decompress(tfm, dtemplate[i].input,
- ilen, decomp_output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: decompression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- if (dlen != dtemplate[i].outlen) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, dtemplate[i].output, dlen)) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s\n", i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- ret = 0;
-
-out:
- kfree(decomp_output);
- kfree(output);
- return ret;
-}
-
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
@@ -3417,21 +3363,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- sg_init_one(&src, input_vec, ilen);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3493,20 +3424,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3519,68 +3436,6 @@ out:
return ret;
}
-static int test_cprng(struct crypto_rng *tfm,
- const struct cprng_testvec *template,
- unsigned int tcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err = 0, i, j, seedsize;
- u8 *seed;
- char result[32];
-
- seedsize = crypto_rng_seedsize(tfm);
-
- seed = kmalloc(seedsize, GFP_KERNEL);
- if (!seed) {
- printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
- "for %s\n", algo);
- return -ENOMEM;
- }
-
- for (i = 0; i < tcount; i++) {
- memset(result, 0, 32);
-
- memcpy(seed, template[i].v, template[i].vlen);
- memcpy(seed + template[i].vlen, template[i].key,
- template[i].klen);
- memcpy(seed + template[i].vlen + template[i].klen,
- template[i].dt, template[i].dtlen);
-
- err = crypto_rng_reset(tfm, seed, seedsize);
- if (err) {
- printk(KERN_ERR "alg: cprng: Failed to reset rng "
- "for %s\n", algo);
- goto out;
- }
-
- for (j = 0; j < template[i].loops; j++) {
- err = crypto_rng_get_bytes(tfm, result,
- template[i].rlen);
- if (err < 0) {
- printk(KERN_ERR "alg: cprng: Failed to obtain "
- "the correct amount of random data for "
- "%s (requested %d)\n", algo,
- template[i].rlen);
- goto out;
- }
- }
-
- err = memcmp(result, template[i].result,
- template[i].rlen);
- if (err) {
- printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
- i, algo);
- hexdump(result, template[i].rlen);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- kfree(seed);
- return err;
-}
-
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@@ -3590,6 +3445,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return 0;
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -3606,115 +3463,25 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *comp;
struct crypto_acomp *acomp;
int err;
- u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
-
- if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
- acomp = crypto_alloc_acomp(driver, type, mask);
- if (IS_ERR(acomp)) {
- pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(acomp));
- return PTR_ERR(acomp);
- }
- err = test_acomp(acomp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
- crypto_free_acomp(acomp);
- } else {
- comp = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(comp)) {
- pr_err("alg: comp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(comp));
- return PTR_ERR(comp);
- }
-
- err = test_comp(comp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
-
- crypto_free_comp(comp);
- }
- return err;
-}
-
-static int alg_test_crc32c(const struct alg_test_desc *desc,
- const char *driver, u32 type, u32 mask)
-{
- struct crypto_shash *tfm;
- __le32 val;
- int err;
-
- err = alg_test_hash(desc, driver, type, mask);
- if (err)
- return err;
- tfm = crypto_alloc_shash(driver, type, mask);
- if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
- /*
- * This crc32c implementation is only available through
- * ahash API, not the shash API, so the remaining part
- * of the test is not applicable to it.
- */
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ if (PTR_ERR(acomp) == -ENOENT)
return 0;
- }
- printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- driver = crypto_shash_driver_name(tfm);
-
- do {
- SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
-
- shash->tfm = tfm;
-
- *ctx = 420553207;
- err = crypto_shash_final(shash, (u8 *)&val);
- if (err) {
- printk(KERN_ERR "alg: crc32c: Operation failed for "
- "%s: %d\n", driver, err);
- break;
- }
-
- if (val != cpu_to_le32(~420553207)) {
- pr_err("alg: crc32c: Test failed for %s: %u\n",
- driver, le32_to_cpu(val));
- err = -EINVAL;
- }
- } while (0);
-
- crypto_free_shash(tfm);
-
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
return err;
}
-static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_rng *rng;
- int err;
-
- rng = crypto_alloc_rng(driver, type, mask);
- if (IS_ERR(rng)) {
- printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(rng));
- return PTR_ERR(rng);
- }
-
- err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
-
- crypto_free_rng(rng);
-
- return err;
-}
-
-
static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
const char *driver, u32 type, u32 mask)
{
@@ -3729,10 +3496,12 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
+ kfree_sensitive(buf);
+ if (PTR_ERR(drng) == -ENOENT)
+ return 0;
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
- kfree_sensitive(buf);
- return -ENOMEM;
+ return PTR_ERR(drng);
}
test_data.testentropy = &testentropy;
@@ -3974,6 +3743,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return 0;
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -4002,11 +3773,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
- struct scatterlist src, dst, src_tab[3];
- const char *m, *c;
- unsigned int m_size, c_size;
- const char *op;
- u8 *key, *ptr;
+ struct scatterlist src, dst, src_tab[2];
+ const char *c;
+ unsigned int c_size;
if (testmgr_alloc_buf(xbuf))
return err;
@@ -4017,92 +3786,53 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
- key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
- GFP_KERNEL);
- if (!key)
- goto free_req;
- memcpy(key, vecs->key, vecs->key_len);
- ptr = key + vecs->key_len;
- ptr = test_pack_u32(ptr, vecs->algo);
- ptr = test_pack_u32(ptr, vecs->param_len);
- memcpy(ptr, vecs->params, vecs->param_len);
-
if (vecs->public_key_vec)
- err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
+ err = crypto_akcipher_set_pub_key(tfm, vecs->key,
+ vecs->key_len);
else
- err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
+ err = crypto_akcipher_set_priv_key(tfm, vecs->key,
+ vecs->key_len);
if (err)
- goto free_key;
+ goto free_req;
- /*
- * First run test which do not require a private key, such as
- * encrypt or verify.
- */
+ /* First run encrypt test which does not require a private key */
err = -ENOMEM;
out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
- goto free_key;
-
- if (!vecs->siggen_sigver_test) {
- m = vecs->m;
- m_size = vecs->m_size;
- c = vecs->c;
- c_size = vecs->c_size;
- op = "encrypt";
- } else {
- /* Swap args so we could keep plaintext (digest)
- * in vecs->m, and cooked signature in vecs->c.
- */
- m = vecs->c; /* signature */
- m_size = vecs->c_size;
- c = vecs->m; /* digest */
- c_size = vecs->m_size;
- op = "verify";
- }
+ goto free_req;
+
+ c = vecs->c;
+ c_size = vecs->c_size;
err = -E2BIG;
- if (WARN_ON(m_size > PAGE_SIZE))
+ if (WARN_ON(vecs->m_size > PAGE_SIZE))
goto free_all;
- memcpy(xbuf[0], m, m_size);
+ memcpy(xbuf[0], vecs->m, vecs->m_size);
- sg_init_table(src_tab, 3);
+ sg_init_table(src_tab, 2);
sg_set_buf(&src_tab[0], xbuf[0], 8);
- sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
- if (vecs->siggen_sigver_test) {
- if (WARN_ON(c_size > PAGE_SIZE))
- goto free_all;
- memcpy(xbuf[1], c, c_size);
- sg_set_buf(&src_tab[2], xbuf[1], c_size);
- akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
- } else {
- sg_init_one(&dst, outbuf_enc, out_len_max);
- akcipher_request_set_crypt(req, src_tab, &dst, m_size,
- out_len_max);
- }
+ sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
+ sg_init_one(&dst, outbuf_enc, out_len_max);
+ akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
+ out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
- err = crypto_wait_req(vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
- /* Run asymmetric encrypt */
- crypto_akcipher_encrypt(req), &wait);
+ err = crypto_wait_req(crypto_akcipher_encrypt(req), &wait);
if (err) {
- pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
+ pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
}
- if (!vecs->siggen_sigver_test && c) {
+ if (c) {
if (req->dst_len != c_size) {
- pr_err("alg: akcipher: %s test failed. Invalid output len\n",
- op);
+ pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
err = -EINVAL;
goto free_all;
}
/* verify that encrypted message is equal to expected */
if (memcmp(c, outbuf_enc, c_size) != 0) {
- pr_err("alg: akcipher: %s test failed. Invalid output\n",
- op);
+ pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
hexdump(outbuf_enc, c_size);
err = -EINVAL;
goto free_all;
@@ -4110,7 +3840,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
}
/*
- * Don't invoke (decrypt or sign) test which require a private key
+ * Don't invoke decrypt test which requires a private key
* for vectors with only a public key.
*/
if (vecs->public_key_vec) {
@@ -4123,13 +3853,12 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
}
- if (!vecs->siggen_sigver_test && !c) {
+ if (!c) {
c = outbuf_enc;
c_size = req->dst_len;
}
err = -E2BIG;
- op = vecs->siggen_sigver_test ? "sign" : "decrypt";
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], c, c_size);
@@ -4139,34 +3868,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
- err = crypto_wait_req(vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
- /* Run asymmetric decrypt */
- crypto_akcipher_decrypt(req), &wait);
+ err = crypto_wait_req(crypto_akcipher_decrypt(req), &wait);
if (err) {
- pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
+ pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
}
out_len = req->dst_len;
- if (out_len < m_size) {
- pr_err("alg: akcipher: %s test failed. Invalid output len %u\n",
- op, out_len);
+ if (out_len < vecs->m_size) {
+ pr_err("alg: akcipher: decrypt test failed. Invalid output len %u\n",
+ out_len);
err = -EINVAL;
goto free_all;
}
/* verify that decrypted message is equal to the original msg */
- if (memchr_inv(outbuf_dec, 0, out_len - m_size) ||
- memcmp(m, outbuf_dec + out_len - m_size, m_size)) {
- pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
+ if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
+ memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
+ vecs->m_size)) {
+ pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
hexdump(outbuf_dec, out_len);
err = -EINVAL;
}
free_all:
kfree(outbuf_dec);
kfree(outbuf_enc);
-free_key:
- kfree(key);
free_req:
akcipher_request_free(req);
free_xbuf:
@@ -4202,6 +3926,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return 0;
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -4214,6 +3940,114 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
return err;
}
+static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
+{
+ u8 *ptr, *key __free(kfree);
+ int err, sig_size;
+
+ key = kmalloc(vecs->key_len + 2 * sizeof(u32) + vecs->param_len,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ /* ecrdsa expects additional parameters appended to the key */
+ memcpy(key, vecs->key, vecs->key_len);
+ ptr = key + vecs->key_len;
+ ptr = test_pack_u32(ptr, vecs->algo);
+ ptr = test_pack_u32(ptr, vecs->param_len);
+ memcpy(ptr, vecs->params, vecs->param_len);
+
+ if (vecs->public_key_vec)
+ err = crypto_sig_set_pubkey(tfm, key, vecs->key_len);
+ else
+ err = crypto_sig_set_privkey(tfm, key, vecs->key_len);
+ if (err)
+ return err;
+
+ /*
+ * Run asymmetric signature verification first
+ * (which does not require a private key)
+ */
+ err = crypto_sig_verify(tfm, vecs->c, vecs->c_size,
+ vecs->m, vecs->m_size);
+ if (err) {
+ pr_err("alg: sig: verify test failed: err %d\n", err);
+ return err;
+ }
+
+ /*
+ * Don't invoke sign test (which requires a private key)
+ * for vectors with only a public key.
+ */
+ if (vecs->public_key_vec)
+ return 0;
+
+ sig_size = crypto_sig_maxsize(tfm);
+ if (sig_size < vecs->c_size) {
+ pr_err("alg: sig: invalid maxsize %u\n", sig_size);
+ return -EINVAL;
+ }
+
+ u8 *sig __free(kfree) = kzalloc(sig_size, GFP_KERNEL);
+ if (!sig)
+ return -ENOMEM;
+
+ /* Run asymmetric signature generation */
+ err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size);
+ if (err < 0) {
+ pr_err("alg: sig: sign test failed: err %d\n", err);
+ return err;
+ }
+
+ /* Verify that generated signature equals cooked signature */
+ if (err != vecs->c_size ||
+ memcmp(sig, vecs->c, vecs->c_size) ||
+ memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) {
+ pr_err("alg: sig: sign test failed: invalid output\n");
+ hexdump(sig, sig_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int test_sig(struct crypto_sig *tfm, const char *alg,
+ const struct sig_testvec *vecs, unsigned int tcount)
+{
+ const char *algo = crypto_tfm_alg_driver_name(crypto_sig_tfm(tfm));
+ int ret, i;
+
+ for (i = 0; i < tcount; i++) {
+ ret = test_sig_one(tfm, vecs++);
+ if (ret) {
+ pr_err("alg: sig: test %d failed for %s: err %d\n",
+ i + 1, algo, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int alg_test_sig(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
+{
+ struct crypto_sig *tfm;
+ int err = 0;
+
+ tfm = crypto_alloc_sig(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: sig: Failed to load tfm for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ if (desc->suite.sig.vecs)
+ err = test_sig(tfm, desc->alg, desc->suite.sig.vecs,
+ desc->suite.sig.count);
+
+ crypto_free_sig(tfm);
+ return err;
+}
+
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@@ -4227,14 +4061,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
- .generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
+ .generic_driver = "adiantum(xchacha12-lib,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
- .generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
+ .generic_driver = "adiantum(xchacha20-lib,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -4246,19 +4080,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = __VECS(aegis128_tv_template)
}
}, {
- .alg = "ansi_cprng",
- .test = alg_test_cprng,
- .suite = {
- .cprng = __VECS(ansi_cprng_aes_tv_template)
- }
- }, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
+ .generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
}
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
+ .generic_driver = "authenc(hmac-sha1-lib,cbc(aes-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4266,12 +4096,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha1),cbc(des))",
+ .generic_driver = "authenc(hmac-sha1-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(des3_ede))",
+ .generic_driver = "authenc(hmac-sha1-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp)
@@ -4282,6 +4114,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha1),ecb(cipher_null))",
+ .generic_driver = "authenc(hmac-sha1-lib,ecb-cipher_null)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp)
@@ -4292,18 +4125,21 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha224),cbc(des))",
+ .generic_driver = "authenc(hmac-sha224-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha224_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),cbc(des3_ede))",
+ .generic_driver = "authenc(hmac-sha224-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
+ .generic_driver = "authenc(hmac-sha256-lib,cbc(aes-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -4311,12 +4147,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha256),cbc(des))",
+ .generic_driver = "authenc(hmac-sha256-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha256_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(des3_ede))",
+ .generic_driver = "authenc(hmac-sha256-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp)
@@ -4326,17 +4164,26 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .generic_driver = "authenc(hmac-sha256-lib,cts(cbc(aes-generic)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128)
+ }
+ }, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha384),cbc(des))",
+ .generic_driver = "authenc(hmac-sha384-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha384_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),cbc(des3_ede))",
+ .generic_driver = "authenc(hmac-sha384-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp)
@@ -4346,11 +4193,19 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .generic_driver = "authenc(hmac-sha384-lib,cts(cbc(aes-generic)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192)
+ }
+ }, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
+ .generic_driver = "authenc(hmac-sha512-lib,cbc(aes-generic))",
.fips_allowed = 1,
.test = alg_test_aead,
.suite = {
@@ -4358,12 +4213,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha512),cbc(des))",
+ .generic_driver = "authenc(hmac-sha512-lib,cbc(des-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha512_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
+ .generic_driver = "authenc(hmac-sha512-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp)
@@ -4378,6 +4235,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "blake2b-160",
+ .generic_driver = "blake2b-160-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4385,6 +4243,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-256",
+ .generic_driver = "blake2b-256-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4392,6 +4251,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-384",
+ .generic_driver = "blake2b-384-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4399,6 +4259,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "blake2b-512",
+ .generic_driver = "blake2b-512-lib",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
@@ -4501,7 +4362,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif
.alg = "cbcmac(aes)",
- .fips_allowed = 1,
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cbcmac_tv_template)
@@ -4534,26 +4394,8 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
- .alg = "cfb(aes)",
- .test = alg_test_skcipher,
- .fips_allowed = 1,
- .suite = {
- .cipher = __VECS(aes_cfb_tv_template)
- },
- }, {
- .alg = "cfb(aria)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(aria_cfb_tv_template)
- },
- }, {
- .alg = "cfb(sm4)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(sm4_cfb_tv_template)
- }
- }, {
.alg = "chacha20",
+ .generic_driver = "chacha20-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(chacha20_tv_template)
@@ -4566,6 +4408,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(aes_cmac128_tv_template)
}
}, {
+ .alg = "cmac(camellia)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(camellia_cmac128_tv_template)
+ }
+ }, {
.alg = "cmac(des3_ede)",
.test = alg_test_hash,
.suite = {
@@ -4578,10 +4426,8 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sm4_cmac128_tv_template)
}
}, {
- .alg = "compress_null",
- .test = alg_test_null,
- }, {
.alg = "crc32",
+ .generic_driver = "crc32-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -4589,24 +4435,11 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "crc32c",
- .test = alg_test_crc32c,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crc32c_tv_template)
- }
- }, {
- .alg = "crc64-rocksoft",
+ .generic_driver = "crc32c-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
- .hash = __VECS(crc64_rocksoft_tv_template)
- }
- }, {
- .alg = "crct10dif",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crct10dif_tv_template)
+ .hash = __VECS(crc32c_tv_template)
}
}, {
.alg = "ctr(aes)",
@@ -4713,10 +4546,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "curve25519",
- .test = alg_test_kpp,
+ .alg = "cts(cbc(sm4))",
+ .test = alg_test_skcipher,
.suite = {
- .kpp = __VECS(curve25519_tv_template)
+ .cipher = __VECS(sm4_cts_tv_template)
}
}, {
.alg = "deflate",
@@ -4729,6 +4562,16 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "deflate-iaa",
+ .test = alg_test_comp,
+ .fips_allowed = 1,
+ .suite = {
+ .comp = {
+ .comp = __VECS(deflate_comp_tv_template),
+ .decomp = __VECS(deflate_decomp_tv_template)
+ }
+ }
+ }, {
.alg = "dh",
.test = alg_test_kpp,
.suite = {
@@ -4759,14 +4602,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
}
}, {
- /*
- * There is no need to specifically test the DRBG with every
- * backend cipher -- covered by drbg_nopr_hmac_sha256 test
- */
- .alg = "drbg_nopr_hmac_sha1",
- .fips_allowed = 1,
- .test = alg_test_null,
- }, {
.alg = "drbg_nopr_hmac_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
@@ -4774,10 +4609,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
}
}, {
- /* covered by drbg_nopr_hmac_sha256 test */
+ /*
+ * There is no need to specifically test the DRBG with every
+ * backend cipher -- covered by drbg_nopr_hmac_sha512 test
+ */
.alg = "drbg_nopr_hmac_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_nopr_hmac_sha512",
.test = alg_test_drbg,
@@ -4786,10 +4624,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.drbg = __VECS(drbg_nopr_hmac_sha512_tv_template)
}
}, {
- .alg = "drbg_nopr_sha1",
- .fips_allowed = 1,
- .test = alg_test_null,
- }, {
.alg = "drbg_nopr_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
@@ -4799,8 +4633,8 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_nopr_sha256 test */
.alg = "drbg_nopr_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_nopr_sha512",
.fips_allowed = 1,
@@ -4822,10 +4656,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "drbg_pr_hmac_sha1",
- .fips_allowed = 1,
- .test = alg_test_null,
- }, {
.alg = "drbg_pr_hmac_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
@@ -4835,17 +4665,13 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_pr_hmac_sha256 test */
.alg = "drbg_pr_hmac_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_pr_hmac_sha512",
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "drbg_pr_sha1",
- .fips_allowed = 1,
- .test = alg_test_null,
- }, {
.alg = "drbg_pr_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
@@ -4855,8 +4681,8 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_pr_sha256 test */
.alg = "drbg_pr_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
+ .fips_allowed = 1
}, {
.alg = "drbg_pr_sha512",
.fips_allowed = 1,
@@ -4876,7 +4702,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ecb(arc4)",
- .generic_driver = "ecb(arc4)-generic",
+ .generic_driver = "arc4-generic",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(arc4_tv_template)
@@ -5022,30 +4848,40 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ecdsa-nist-p192",
- .test = alg_test_akcipher,
+ .test = alg_test_sig,
.suite = {
- .akcipher = __VECS(ecdsa_nist_p192_tv_template)
+ .sig = __VECS(ecdsa_nist_p192_tv_template)
}
}, {
.alg = "ecdsa-nist-p256",
- .test = alg_test_akcipher,
+ .test = alg_test_sig,
+ .fips_allowed = 1,
.suite = {
- .akcipher = __VECS(ecdsa_nist_p256_tv_template)
+ .sig = __VECS(ecdsa_nist_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p384",
- .test = alg_test_akcipher,
+ .test = alg_test_sig,
+ .fips_allowed = 1,
+ .suite = {
+ .sig = __VECS(ecdsa_nist_p384_tv_template)
+ }
+ }, {
+ .alg = "ecdsa-nist-p521",
+ .test = alg_test_sig,
+ .fips_allowed = 1,
.suite = {
- .akcipher = __VECS(ecdsa_nist_p384_tv_template)
+ .sig = __VECS(ecdsa_nist_p521_tv_template)
}
}, {
.alg = "ecrdsa",
- .test = alg_test_akcipher,
+ .test = alg_test_sig,
.suite = {
- .akcipher = __VECS(ecrdsa_tv_template)
+ .sig = __VECS(ecrdsa_tv_template)
}
}, {
.alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
+ .generic_driver = "essiv(authenc(hmac-sha256-lib,cbc(aes-generic)),sha256-lib)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -5053,6 +4889,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "essiv(cbc(aes),sha256)",
+ .generic_driver = "essiv(cbc(aes-generic),sha256-lib)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -5120,20 +4957,19 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ghash",
.test = alg_test_hash,
- .fips_allowed = 1,
.suite = {
.hash = __VECS(ghash_tv_template)
}
}, {
.alg = "hctr2(aes)",
- .generic_driver =
- "hctr2_base(xctr(aes-generic),polyval-generic)",
+ .generic_driver = "hctr2_base(xctr(aes-generic),polyval-lib)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_hctr2_tv_template)
}
}, {
.alg = "hmac(md5)",
+ .generic_driver = "hmac-md5-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_md5_tv_template)
@@ -5146,6 +4982,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha1)",
+ .generic_driver = "hmac-sha1-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5153,6 +4990,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha224)",
+ .generic_driver = "hmac-sha224-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5160,6 +4998,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha256)",
+ .generic_driver = "hmac-sha256-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5167,6 +5006,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-224)",
+ .generic_driver = "hmac(sha3-224-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5174,6 +5014,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-256)",
+ .generic_driver = "hmac(sha3-256-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5181,6 +5022,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-384)",
+ .generic_driver = "hmac(sha3-384-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5188,6 +5030,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha3-512)",
+ .generic_driver = "hmac(sha3-512-lib)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5195,6 +5038,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha384)",
+ .generic_driver = "hmac-sha384-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5202,6 +5046,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "hmac(sha512)",
+ .generic_driver = "hmac-sha512-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5230,12 +5075,9 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "kw(aes)",
- .test = alg_test_skcipher,
- .fips_allowed = 1,
- .suite = {
- .cipher = __VECS(aes_kw_tv_template)
- }
+ .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .test = alg_test_aead,
+ .suite.aead = __VECS(krb5_test_camellia_cts_cmac)
}, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
@@ -5319,6 +5161,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "md5",
+ .generic_driver = "md5-lib",
.test = alg_test_hash,
.suite = {
.hash = __VECS(md5_tv_template)
@@ -5336,25 +5179,23 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(nhpoly1305_tv_template)
}
}, {
- .alg = "ofb(aes)",
- .test = alg_test_skcipher,
+ .alg = "p1363(ecdsa-nist-p192)",
+ .test = alg_test_null,
+ }, {
+ .alg = "p1363(ecdsa-nist-p256)",
+ .test = alg_test_sig,
.fips_allowed = 1,
.suite = {
- .cipher = __VECS(aes_ofb_tv_template)
+ .sig = __VECS(p1363_ecdsa_nist_p256_tv_template)
}
}, {
- /* Same as ofb(aes) except the key is stored in
- * hardware secure memory which we reference by index
- */
- .alg = "ofb(paes)",
+ .alg = "p1363(ecdsa-nist-p384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "ofb(sm4)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(sm4_ofb_tv_template)
- }
+ .alg = "p1363(ecdsa-nist-p521)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
}, {
.alg = "pcbc(fcrypt)",
.test = alg_test_skcipher,
@@ -5362,37 +5203,77 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(fcrypt_pcbc_tv_template)
}
}, {
- .alg = "pkcs1pad(rsa,sha224)",
- .test = alg_test_null,
+#if IS_ENABLED(CONFIG_CRYPTO_PHMAC_S390)
+ .alg = "phmac(sha224)",
+ .test = alg_test_hash,
.fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha224_tv_template)
+ }
}, {
- .alg = "pkcs1pad(rsa,sha256)",
- .test = alg_test_akcipher,
+ .alg = "phmac(sha256)",
+ .test = alg_test_hash,
.fips_allowed = 1,
.suite = {
- .akcipher = __VECS(pkcs1pad_rsa_tv_template)
+ .hash = __VECS(hmac_sha256_tv_template)
}
}, {
- .alg = "pkcs1pad(rsa,sha384)",
- .test = alg_test_null,
+ .alg = "phmac(sha384)",
+ .test = alg_test_hash,
.fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha384_tv_template)
+ }
}, {
- .alg = "pkcs1pad(rsa,sha512)",
- .test = alg_test_null,
+ .alg = "phmac(sha512)",
+ .test = alg_test_hash,
.fips_allowed = 1,
+ .suite = {
+ .hash = __VECS(hmac_sha512_tv_template)
+ }
}, {
- .alg = "poly1305",
- .test = alg_test_hash,
+#endif
+ .alg = "pkcs1(rsa,none)",
+ .test = alg_test_sig,
.suite = {
- .hash = __VECS(poly1305_tv_template)
+ .sig = __VECS(pkcs1_rsa_none_tv_template)
}
}, {
- .alg = "polyval",
- .test = alg_test_hash,
+ .alg = "pkcs1(rsa,sha224)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1(rsa,sha256)",
+ .test = alg_test_sig,
+ .fips_allowed = 1,
.suite = {
- .hash = __VECS(polyval_tv_template)
+ .sig = __VECS(pkcs1_rsa_tv_template)
}
}, {
+ .alg = "pkcs1(rsa,sha3-256)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1(rsa,sha3-384)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1(rsa,sha3-512)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1(rsa,sha384)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1(rsa,sha512)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1pad(rsa)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "rfc3686(ctr(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -5442,12 +5323,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc7539(chacha20,poly1305)",
+ .generic_driver = "rfc7539(chacha20-lib,poly1305-generic)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(rfc7539_tv_template)
}
}, {
.alg = "rfc7539esp(chacha20,poly1305)",
+ .generic_driver = "rfc7539esp(chacha20-lib,poly1305-generic)",
.test = alg_test_aead,
.suite = {
.aead = {
@@ -5471,6 +5354,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha1",
+ .generic_driver = "sha1-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5478,6 +5362,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha224",
+ .generic_driver = "sha224-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5485,6 +5370,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha256",
+ .generic_driver = "sha256-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5492,6 +5378,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-224",
+ .generic_driver = "sha3-224-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5499,6 +5386,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-256",
+ .generic_driver = "sha3-256-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5506,6 +5394,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-384",
+ .generic_driver = "sha3-384-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5513,6 +5402,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha3-512",
+ .generic_driver = "sha3-512-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5520,6 +5410,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha384",
+ .generic_driver = "sha384-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
@@ -5527,18 +5418,13 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "sha512",
+ .generic_driver = "sha512-lib",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha512_tv_template)
}
}, {
- .alg = "sm2",
- .test = alg_test_akcipher,
- .suite = {
- .akcipher = __VECS(sm2_tv_template)
- }
- }, {
.alg = "sm3",
.test = alg_test_hash,
.suite = {
@@ -5557,12 +5443,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(streebog512_tv_template)
}
}, {
- .alg = "vmac64(aes)",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(vmac64_aes_tv_template)
- }
- }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
@@ -5581,19 +5461,54 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(wp512_tv_template)
}
}, {
+ .alg = "x962(ecdsa-nist-p192)",
+ .test = alg_test_sig,
+ .suite = {
+ .sig = __VECS(x962_ecdsa_nist_p192_tv_template)
+ }
+ }, {
+ .alg = "x962(ecdsa-nist-p256)",
+ .test = alg_test_sig,
+ .fips_allowed = 1,
+ .suite = {
+ .sig = __VECS(x962_ecdsa_nist_p256_tv_template)
+ }
+ }, {
+ .alg = "x962(ecdsa-nist-p384)",
+ .test = alg_test_sig,
+ .fips_allowed = 1,
+ .suite = {
+ .sig = __VECS(x962_ecdsa_nist_p384_tv_template)
+ }
+ }, {
+ .alg = "x962(ecdsa-nist-p521)",
+ .test = alg_test_sig,
+ .fips_allowed = 1,
+ .suite = {
+ .sig = __VECS(x962_ecdsa_nist_p521_tv_template)
+ }
+ }, {
.alg = "xcbc(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_xcbc128_tv_template)
}
}, {
+ .alg = "xcbc(sm4)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(sm4_xcbc128_tv_template)
+ }
+ }, {
.alg = "xchacha12",
+ .generic_driver = "xchacha12-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha12_tv_template)
},
}, {
.alg = "xchacha20",
+ .generic_driver = "xchacha20-lib",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha20_tv_template)
@@ -5641,6 +5556,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
+ .alg = "xts(sm4)",
+ .generic_driver = "xts(ecb(sm4-generic))",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_xts_tv_template)
+ }
+ }, {
.alg = "xts(twofish)",
.generic_driver = "xts(ecb(twofish-generic))",
.test = alg_test_skcipher,
@@ -5657,14 +5579,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
#endif
- .alg = "xts4096(paes)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "xts512(paes)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "xxhash64",
.test = alg_test_hash,
.fips_allowed = 1,
@@ -5672,16 +5586,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(xxhash64_tv_template)
}
}, {
- .alg = "zlib-deflate",
- .test = alg_test_comp,
- .fips_allowed = 1,
- .suite = {
- .comp = {
- .comp = __VECS(zlib_deflate_comp_tv_template),
- .decomp = __VECS(zlib_deflate_decomp_tv_template)
- }
- }
- }, {
.alg = "zstd",
.test = alg_test_comp,
.fips_allowed = 1,
@@ -5733,9 +5637,8 @@ static void testmgr_onetime_init(void)
alg_check_test_descs_order();
alg_check_testvec_configs();
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
-#endif
+ if (!noslowtests)
+ pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n");
}
static int alg_find_test(const char *alg)
@@ -5824,11 +5727,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
test_done:
if (rc) {
- if (fips_enabled || panic_on_fail) {
+ if (fips_enabled) {
fips_fail_notify();
- panic("alg: self-tests for %s (%s) failed in %s mode!\n",
- driver, alg,
- fips_enabled ? "fips" : "panic_on_fail");
+ panic("alg: self-tests for %s (%s) failed in fips mode!\n",
+ driver, alg);
}
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
@@ -5844,6 +5746,25 @@ test_done:
return rc;
notest:
+ if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_LSKCIPHER) {
+ char nalg[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
+ sizeof(nalg))
+ goto notest2;
+
+ i = alg_find_test(nalg);
+ if (i < 0)
+ goto notest2;
+
+ if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ goto non_fips_alg;
+
+ rc = alg_test_skcipher(alg_test_descs + i, driver, type, mask);
+ goto test_done;
+ }
+
+notest2:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
if (type & CRYPTO_ALG_FIPS_INTERNAL)
@@ -5854,6 +5775,6 @@ non_fips_alg:
return alg_fips_disabled(driver, alg);
}
-#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+#endif /* CONFIG_CRYPTO_SELFTESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d6088e26f326..80bf5f1b67a6 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -21,6 +21,7 @@
#define _CRYPTO_TESTMGR_H
#include <linux/oid_registry.h>
+#include <crypto/internal/ecc.h>
#define MAX_IVLEN 32
@@ -58,8 +59,6 @@ struct hash_testvec {
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
- * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
- * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
* @setkey_error: Expected error from setkey()
* @crypt_error: Expected error from encrypt() and decrypt()
*/
@@ -73,7 +72,6 @@ struct cipher_testvec {
unsigned short klen;
unsigned int len;
bool fips_skip;
- bool generates_iv;
int setkey_error;
int crypt_error;
};
@@ -121,18 +119,6 @@ struct aead_testvec {
int crypt_error;
};
-struct cprng_testvec {
- const char *key;
- const char *dt;
- const char *v;
- const char *result;
- unsigned char klen;
- unsigned short dtlen;
- unsigned short vlen;
- unsigned short rlen;
- unsigned short loops;
-};
-
struct drbg_testvec {
const unsigned char *entropy;
size_t entropylen;
@@ -150,6 +136,16 @@ struct drbg_testvec {
struct akcipher_testvec {
const unsigned char *key;
+ const unsigned char *m;
+ const unsigned char *c;
+ unsigned int key_len;
+ unsigned int m_size;
+ unsigned int c_size;
+ bool public_key_vec;
+};
+
+struct sig_testvec {
+ const unsigned char *key;
const unsigned char *params;
const unsigned char *m;
const unsigned char *c;
@@ -158,7 +154,6 @@ struct akcipher_testvec {
unsigned int m_size;
unsigned int c_size;
bool public_key_vec;
- bool siggen_sigver_test;
enum OID algo;
};
@@ -647,26 +642,713 @@ static const struct akcipher_testvec rsa_tv_template[] = {
}
};
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \
+ 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8
+#else
+#define be64_to_cpua(b1, b2, b3, b4, b5, b6, b7, b8) \
+ 0x##b8, 0x##b7, 0x##b6, 0x##b5, 0x##b4, 0x##b3, 0x##b2, 0x##b1
+#endif
+
/*
* ECDSA test vectors.
*/
-static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
+static const struct sig_testvec ecdsa_nist_p192_tv_template[] = {
{
- .key =
+ .key = /* secp192r1(sha1) */
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .c = (const unsigned char[]){
+ be64_to_cpua(ad, 59, ad, 88, 27, d6, 92, 6b),
+ be64_to_cpua(a0, 27, 91, c6, f6, 7f, c3, 09),
+ be64_to_cpua(ba, e5, 93, 83, 6e, b6, 3b, 63),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(86, 80, 6f, a5, 79, 77, da, d0),
+ be64_to_cpua(ef, 95, 52, 7b, a0, 0f, e4, 18),
+ be64_to_cpua(10, 68, 01, 9d, ba, ce, 83, 08),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp192r1(sha224) */
+ "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
+ "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
+ "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
+ "\xa3",
+ .key_len = 49,
+ .m =
+ "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40"
+ "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11",
+ .m_size = 28,
+ .c = (const unsigned char[]){
+ be64_to_cpua(83, 7b, 12, e6, b6, 5b, cb, d4),
+ be64_to_cpua(14, f8, 11, 2b, 55, dc, ae, 37),
+ be64_to_cpua(5a, 8b, 82, 69, 7e, 8a, 0a, 09),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(a3, e3, 5c, 99, db, 92, 5b, 36),
+ be64_to_cpua(eb, c3, 92, 0f, 1e, 72, ee, c4),
+ be64_to_cpua(6a, 14, 4f, 53, 75, c8, 02, 48),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp192r1(sha256) */
+ "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae"
+ "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56"
+ "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58"
+ "\x91",
+ .key_len = 49,
+ .m =
+ "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd"
+ "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7",
+ .m_size = 32,
+ .c = (const unsigned char[]){
+ be64_to_cpua(01, 48, fb, 5f, 72, 2a, d4, 8f),
+ be64_to_cpua(6b, 1a, 58, 56, f1, 8f, f7, fd),
+ be64_to_cpua(3f, 72, 3f, 1f, 42, d2, 3f, 1d),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(7d, 3a, 97, d9, cd, 1a, 6a, 49),
+ be64_to_cpua(32, dd, 41, 74, 6a, 51, c7, d9),
+ be64_to_cpua(b3, 69, 43, fd, 48, 19, 86, cf),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp192r1(sha384) */
+ "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7"
+ "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f"
+ "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e"
+ "\x8b",
+ .key_len = 49,
+ .m =
+ "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9"
+ "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61"
+ "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78",
+ .m_size = 48,
+ .c = (const unsigned char[]){
+ be64_to_cpua(dd, 15, bb, d6, 8c, a7, 03, 78),
+ be64_to_cpua(cf, 7f, 34, b4, b4, e5, c5, 00),
+ be64_to_cpua(f0, a3, 38, ce, 2b, f8, 9d, 1a),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(93, 12, 3b, 3b, 28, fb, 6d, e1),
+ be64_to_cpua(d1, 01, 77, 44, 5d, 53, a4, 7c),
+ be64_to_cpua(64, bc, 5a, 1f, 82, 96, 61, d7),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp192r1(sha512) */
+ "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea"
+ "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d"
+ "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11"
+ "\x57",
+ .key_len = 49,
+ .m =
+ "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23"
+ "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67"
+ "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70"
+ "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5",
+ .m_size = 64,
+ .c = (const unsigned char[]){
+ be64_to_cpua(2b, 11, 2d, 1c, b6, 06, c9, 6c),
+ be64_to_cpua(dd, 3f, 07, 87, 12, a0, d4, ac),
+ be64_to_cpua(88, 5b, 8f, 59, 43, bf, cf, c6),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(28, 6a, df, 97, fd, 82, 76, 24),
+ be64_to_cpua(a9, 14, 2a, 5e, f5, e5, fb, 72),
+ be64_to_cpua(73, b4, 22, 9a, 98, 73, 3c, 83),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+};
+
+static const struct sig_testvec ecdsa_nist_p256_tv_template[] = {
+ {
+ .key = /* secp256r1(sha1) */
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .c = (const unsigned char[]){
+ be64_to_cpua(ee, ca, 6a, 52, 0e, 48, 4d, cc),
+ be64_to_cpua(f7, d4, ad, 8d, 94, 5a, 69, 89),
+ be64_to_cpua(cf, d4, e7, b7, f0, 82, 56, 41),
+ be64_to_cpua(f9, 25, ce, 9f, 3a, a6, 35, 81),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(fb, 9d, 8b, de, d4, 8d, 6f, ad),
+ be64_to_cpua(f1, 03, 03, f3, 3b, e2, 73, f7),
+ be64_to_cpua(8a, fa, 54, 93, 29, a7, 70, 86),
+ be64_to_cpua(d7, e4, ef, 52, 66, d3, 5b, 9d),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp256r1(sha224) */
+ "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
+ "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
+ "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
+ "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0"
+ "\xd4",
+ .key_len = 65,
+ .m =
+ "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41"
+ "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0",
+ .m_size = 28,
+ .c = (const unsigned char[]){
+ be64_to_cpua(7d, 25, d8, 25, f5, 81, d2, 1e),
+ be64_to_cpua(34, 62, 79, cb, 6a, 91, 67, 2e),
+ be64_to_cpua(ae, ce, 77, 59, 1a, db, 59, d5),
+ be64_to_cpua(20, 43, fa, c0, 9f, 9d, 7b, e7),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(ce, d5, 2e, 8b, de, 5a, 04, 0e),
+ be64_to_cpua(bf, 50, 05, 58, 39, 0e, 26, 92),
+ be64_to_cpua(76, 20, 4a, 77, 22, ec, c8, 66),
+ be64_to_cpua(5f, f8, 74, f8, 57, d0, 5e, 54),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp256r1(sha256) */
+ "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
+ "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
+ "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
+ "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
+ "\xb8",
+ .key_len = 65,
+ .m =
+ "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
+ "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
+ .m_size = 32,
+ .c = (const unsigned char[]){
+ be64_to_cpua(91, dc, 02, 67, dc, 0c, d0, 82),
+ be64_to_cpua(ac, 44, c3, e8, 24, 11, 2d, a4),
+ be64_to_cpua(09, dc, 29, 63, a8, 1a, ad, fc),
+ be64_to_cpua(08, 31, fa, 74, 0d, 1d, 21, 5d),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(4f, 2a, 65, 35, 23, e3, 1d, fa),
+ be64_to_cpua(0a, 6e, 1b, c4, af, e1, 83, c3),
+ be64_to_cpua(f9, a9, 81, ac, 4a, 50, d0, 91),
+ be64_to_cpua(bd, ff, ce, ee, 42, c3, 97, ff),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp256r1(sha384) */
+ "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d"
+ "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e"
+ "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00"
+ "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2"
+ "\x7c",
+ .key_len = 65,
+ .m =
+ "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6"
+ "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94"
+ "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb",
+ .m_size = 48,
+ .c = (const unsigned char[]){
+ be64_to_cpua(f2, e4, 6c, c7, 94, b1, d5, fe),
+ be64_to_cpua(08, b2, 6b, 24, 94, 48, 46, 5e),
+ be64_to_cpua(d0, 2e, 95, 54, d1, 95, 64, 93),
+ be64_to_cpua(8e, f3, 6f, dc, f8, 69, a6, 2e),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(c0, 60, 11, 92, dc, 17, 89, 12),
+ be64_to_cpua(69, f4, 3b, 4f, 47, cf, 9b, 16),
+ be64_to_cpua(19, fb, 5f, 92, f4, c9, 23, 37),
+ be64_to_cpua(eb, a7, 80, 26, dc, f9, 3a, 44),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp256r1(sha512) */
+ "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a"
+ "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38"
+ "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b"
+ "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92"
+ "\xbf",
+ .key_len = 65,
+ .m =
+ "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9"
+ "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca"
+ "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf"
+ "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6",
+ .m_size = 64,
+ .c = (const unsigned char[]){
+ be64_to_cpua(76, f6, 04, 99, 09, 37, 4d, fa),
+ be64_to_cpua(ed, 8c, 73, 30, 6c, 22, b3, 97),
+ be64_to_cpua(40, ea, 44, 81, 00, 4e, 29, 08),
+ be64_to_cpua(b8, 6d, 87, 81, 43, df, fb, 9f),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(76, 31, 79, 4a, e9, 81, 6a, ee),
+ be64_to_cpua(5c, ad, c3, 78, 1c, c2, c1, 19),
+ be64_to_cpua(f8, 00, dd, ab, d4, c0, 2b, e6),
+ be64_to_cpua(1e, b9, 75, 31, f6, 04, a5, 4d),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+};
+
+static const struct sig_testvec ecdsa_nist_p384_tv_template[] = {
+ {
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .c = (const unsigned char[]){
+ be64_to_cpua(ec, 7c, 7e, d0, 87, d7, d7, 6e),
+ be64_to_cpua(78, f1, 4c, 26, e6, 5b, 86, cf),
+ be64_to_cpua(3a, c6, f1, 32, 3c, ce, 70, 2b),
+ be64_to_cpua(8d, 26, 8e, ae, 63, 3f, bc, 20),
+ be64_to_cpua(57, 55, 07, 20, 43, 30, de, a0),
+ be64_to_cpua(f5, 0f, 24, 4c, 07, 93, 6f, 21),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(79, 12, 2a, b7, c5, 15, 92, c5),
+ be64_to_cpua(4a, a1, 59, f1, 1c, a4, 58, 26),
+ be64_to_cpua(74, a0, 0f, bf, af, c3, 36, 76),
+ be64_to_cpua(df, 28, 8c, 1b, fa, f9, 95, 88),
+ be64_to_cpua(5f, 63, b1, be, 5e, 4c, 0e, a1),
+ be64_to_cpua(cd, bb, 7e, 81, 5d, 8f, 63, c0),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp384r1(sha224) */
+ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
+ "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
+ "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05"
+ "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc"
+ "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50"
+ "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d"
+ "\xe0",
+ .key_len = 97,
+ .m =
+ "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd"
+ "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5",
+ .m_size = 28,
+ .c = (const unsigned char[]){
+ be64_to_cpua(3f, dd, 15, 1b, 68, 2b, 9d, 8b),
+ be64_to_cpua(c9, 9c, 11, b8, 10, 01, c5, 41),
+ be64_to_cpua(c5, da, b4, e3, 93, 07, e0, 99),
+ be64_to_cpua(97, f1, c8, 72, 26, cf, 5a, 5e),
+ be64_to_cpua(ec, cb, e4, 89, 47, b2, f7, bc),
+ be64_to_cpua(8a, 51, 84, ce, 13, 1e, d2, dc),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(88, 2b, 82, 26, 5e, 1c, da, fb),
+ be64_to_cpua(9f, 19, d0, 42, 8b, 93, c2, 11),
+ be64_to_cpua(4d, d0, c6, 6e, b0, e9, fc, 14),
+ be64_to_cpua(df, d8, 68, a2, 64, 42, 65, f3),
+ be64_to_cpua(4b, 00, 08, 31, 6c, f5, d5, f6),
+ be64_to_cpua(8b, 03, 2c, fc, 1f, d1, a9, a4),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp384r1(sha256) */
+ "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a"
+ "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a"
+ "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e"
+ "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4"
+ "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52"
+ "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc"
+ "\xab",
+ .key_len = 97,
+ .m =
+ "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3"
+ "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2",
+ .m_size = 32,
+ .c = (const unsigned char[]){
+ be64_to_cpua(c8, 8d, 2c, 79, 3a, 8e, 32, c4),
+ be64_to_cpua(b6, c6, fc, 70, 2e, 66, 3c, 77),
+ be64_to_cpua(af, 06, 3f, 84, 04, e2, f9, 67),
+ be64_to_cpua(cc, 47, 53, 87, bc, bd, 83, 3f),
+ be64_to_cpua(8e, 3f, 7e, ce, 0a, 9b, aa, 59),
+ be64_to_cpua(08, 09, 12, 9d, 6e, 96, 64, a6),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(10, 0e, f4, 1f, 39, ca, 4d, 43),
+ be64_to_cpua(4f, 8d, de, 1e, 93, 8d, 95, bb),
+ be64_to_cpua(15, 68, c0, 75, 3e, 23, 5e, 36),
+ be64_to_cpua(dd, ce, bc, b2, 97, f4, 9c, f3),
+ be64_to_cpua(26, a2, b0, 89, 42, 0a, da, d9),
+ be64_to_cpua(40, 34, b8, 90, a9, 80, ab, 47),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp384r1(sha384) */
+ "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f"
+ "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9"
+ "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c"
+ "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a"
+ "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9"
+ "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89"
+ "\x9e",
+ .key_len = 97,
+ .m =
+ "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf"
+ "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1"
+ "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff",
+ .m_size = 48,
+ .c = (const unsigned char[]){
+ be64_to_cpua(a2, a4, c8, f2, ea, 9d, 11, 1f),
+ be64_to_cpua(3b, 1f, 07, 8f, 15, 02, fe, 1d),
+ be64_to_cpua(29, e6, fb, ca, 8c, d6, b6, b4),
+ be64_to_cpua(2d, 7a, 91, 5f, 49, 2d, 22, 08),
+ be64_to_cpua(ee, 2e, 62, 35, 46, fa, 00, d8),
+ be64_to_cpua(9b, 28, 68, c0, a1, ea, 8c, 50),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(ab, 8d, 4e, de, e6, 6d, 9b, 66),
+ be64_to_cpua(96, 17, 04, c9, 05, 77, f1, 8e),
+ be64_to_cpua(44, 92, 8c, 86, 99, 65, b3, 97),
+ be64_to_cpua(71, cd, 8f, 18, 99, f0, 0f, 13),
+ be64_to_cpua(bf, e3, 75, 24, 49, ac, fb, c8),
+ be64_to_cpua(fc, 50, f6, 43, bd, 50, 82, 0e),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ }, {
+ .key = /* secp384r1(sha512) */
+ "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46"
+ "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4"
+ "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95"
+ "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe"
+ "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa"
+ "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac"
+ "\xa3",
+ .key_len = 97,
+ .m =
+ "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1"
+ "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71"
+ "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc"
+ "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e",
+ .m_size = 64,
+ .c = (const unsigned char[]){
+ be64_to_cpua(3e, b3, c7, a8, b3, 17, 77, d1),
+ be64_to_cpua(dc, 2b, 43, 0e, 6a, b3, 53, 6f),
+ be64_to_cpua(4c, fc, 6f, 80, e3, af, b3, d9),
+ be64_to_cpua(9a, 02, de, 93, e8, 83, e4, 84),
+ be64_to_cpua(4d, c6, ef, da, 02, e7, 0f, 52),
+ be64_to_cpua(00, 1d, 20, 94, 77, fe, 31, fa),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(4e, 45, cf, 3c, 93, ff, 50, 5d),
+ be64_to_cpua(34, e4, 8b, 80, a5, b6, da, 2c),
+ be64_to_cpua(c4, 6a, 03, 5f, 8d, 7a, f9, fb),
+ be64_to_cpua(ec, 63, e3, 0c, ec, 50, dc, cc),
+ be64_to_cpua(de, 3a, 3d, 16, af, b4, 52, 6a),
+ be64_to_cpua(63, f6, f0, 3d, 5f, 5f, 99, 3f),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 00) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+};
+
+static const struct sig_testvec ecdsa_nist_p521_tv_template[] = {
+ {
+ .key = /* secp521r1(sha224) */
+ "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0"
+ "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7"
+ "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61"
+ "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb"
+ "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a"
+ "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76"
+ "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47"
+ "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f"
+ "\x3b\x83\x82\x2f\x14",
+ .key_len = 133,
+ .m =
+ "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4"
+ "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb",
+ .m_size = 28,
+ .c = (const unsigned char[]){
+ be64_to_cpua(46, 6b, c7, af, 7a, b9, 19, 0a),
+ be64_to_cpua(6c, a6, 9b, 89, 8b, 1e, fd, 09),
+ be64_to_cpua(98, 85, 29, 88, ff, 0b, 94, 94),
+ be64_to_cpua(18, c6, 37, 8a, cb, a7, d8, 7d),
+ be64_to_cpua(f8, 3f, 59, 0f, 74, f0, 3f, d8),
+ be64_to_cpua(e2, ef, 07, 92, ee, 60, 94, 06),
+ be64_to_cpua(35, f6, dc, 6d, 02, 7b, 22, ac),
+ be64_to_cpua(d6, 43, e7, ff, 42, b2, ba, 74),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 01),
+ be64_to_cpua(50, b1, a5, 98, 92, 2a, a5, 52),
+ be64_to_cpua(1c, ad, 22, da, 82, 00, 35, a3),
+ be64_to_cpua(0e, 64, cc, c4, e8, 43, d9, 0e),
+ be64_to_cpua(30, 90, 0f, 1c, 8f, 78, d3, 9f),
+ be64_to_cpua(26, 0b, 5f, 49, 32, 6b, 91, 99),
+ be64_to_cpua(0f, f8, 65, 97, 6b, 09, 4d, 22),
+ be64_to_cpua(5e, f9, 88, f3, d2, 32, 90, 57),
+ be64_to_cpua(26, 0d, 55, cd, 23, 1e, 7d, a0),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 3a) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha256) */
+ "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae"
+ "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14"
+ "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36"
+ "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81"
+ "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c"
+ "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09"
+ "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05"
+ "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3"
+ "\xe6\x77\x1e\x1f\x8a",
+ .key_len = 133,
+ .m =
+ "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39"
+ "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73",
+ .m_size = 32,
+ .c = (const unsigned char[]){
+ be64_to_cpua(de, 7e, d7, 59, 10, e9, d9, d5),
+ be64_to_cpua(38, 1f, 46, 0b, 04, 64, 34, 79),
+ be64_to_cpua(ae, ce, 54, 76, 9a, c2, 8f, b8),
+ be64_to_cpua(95, 35, 6f, 02, 0e, af, e1, 4c),
+ be64_to_cpua(56, 3c, f6, f0, d8, e1, b7, 5d),
+ be64_to_cpua(50, 9f, 7d, 1f, ca, 8b, a8, 2d),
+ be64_to_cpua(06, 0f, fd, 83, fc, 0e, d9, ce),
+ be64_to_cpua(a5, 5f, 57, 52, 27, 78, 3a, b5),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, cd),
+ be64_to_cpua(55, 38, b6, f6, 34, 65, c7, bd),
+ be64_to_cpua(1c, 57, 56, 8f, 12, b7, 1d, 91),
+ be64_to_cpua(03, 42, 02, 5f, 50, f0, a2, 0d),
+ be64_to_cpua(fa, 10, dd, 9b, fb, 36, 1a, 31),
+ be64_to_cpua(e0, 87, 2c, 44, 4b, 5a, ee, af),
+ be64_to_cpua(a9, 79, 24, b9, 37, 35, dd, a0),
+ be64_to_cpua(6b, 35, ae, 65, b5, 99, 12, 0a),
+ be64_to_cpua(50, 85, 38, f9, 15, 83, 18, 04),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 01, cf) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha384) */
+ "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b"
+ "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33"
+ "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4"
+ "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c"
+ "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47"
+ "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14"
+ "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46"
+ "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39"
+ "\xb3\x3b\x5b\x1b\x94",
+ .key_len = 133,
+ .m =
+ "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26"
+ "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59"
+ "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76",
+ .m_size = 48,
+ .c = (const unsigned char[]){
+ be64_to_cpua(b8, 6a, dd, fb, e6, 63, 4e, 28),
+ be64_to_cpua(84, 59, fd, 1a, c4, 40, dd, 43),
+ be64_to_cpua(32, 76, 06, d0, f9, c0, e4, e6),
+ be64_to_cpua(e4, df, 9b, 7d, 9e, 47, ca, 33),
+ be64_to_cpua(7e, 42, 71, 86, 57, 2d, f1, 7d),
+ be64_to_cpua(f2, 4b, 64, 98, f7, ec, da, c7),
+ be64_to_cpua(ec, 51, dc, e8, 35, 5e, ae, 16),
+ be64_to_cpua(96, 76, 3c, 27, ea, aa, 9c, 26),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, 93),
+ be64_to_cpua(c6, 4f, ab, 2b, 62, c1, 42, b1),
+ be64_to_cpua(e5, 5a, 94, 56, cf, 8f, b4, 22),
+ be64_to_cpua(6a, c3, f3, 7a, d1, fa, e7, a7),
+ be64_to_cpua(df, c4, c0, db, 54, db, 8a, 0d),
+ be64_to_cpua(da, a7, cd, 26, 28, 76, 3b, 52),
+ be64_to_cpua(e4, 3c, bc, 93, 65, 57, 1c, 30),
+ be64_to_cpua(55, ce, 37, 97, c9, 05, 51, e5),
+ be64_to_cpua(c3, 6a, 87, 6e, b5, 13, 1f, 20),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 00, ff) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha512) */
+ "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f"
+ "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a"
+ "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f"
+ "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee"
+ "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e"
+ "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6"
+ "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77"
+ "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae"
+ "\xd0\x17\xdf\x49\x6a",
+ .key_len = 133,
+ .m =
+ "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69"
+ "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed"
+ "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3"
+ "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68",
+ .m_size = 64,
+ .c = (const unsigned char[]){
+ be64_to_cpua(28, b5, 04, b0, b6, 33, 1c, 7e),
+ be64_to_cpua(80, a6, 13, fc, b6, 90, f7, bb),
+ be64_to_cpua(27, 93, e8, 6c, 49, 7d, 28, fc),
+ be64_to_cpua(1f, 12, 3e, b7, 7e, 51, ff, 7f),
+ be64_to_cpua(fb, 62, 1e, 42, 03, 6c, 74, 8a),
+ be64_to_cpua(63, 0e, 02, cc, 94, a9, 05, b9),
+ be64_to_cpua(aa, 86, ec, a8, 05, 03, 52, 56),
+ be64_to_cpua(71, 86, 96, ac, 21, 33, 7e, 4e),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 5c),
+ be64_to_cpua(46, 1e, 77, 44, 78, e0, d1, 04),
+ be64_to_cpua(72, 74, 13, 63, 39, a6, e5, 25),
+ be64_to_cpua(00, 55, bb, 6a, b4, 73, 00, d2),
+ be64_to_cpua(71, d0, e9, ca, a7, c0, cb, aa),
+ be64_to_cpua(7a, 76, 37, 51, 47, 49, 98, 12),
+ be64_to_cpua(88, 05, 3e, 43, 39, 01, bd, b7),
+ be64_to_cpua(95, 35, 89, 4f, 41, 5f, 9e, 19),
+ be64_to_cpua(43, 52, 1d, e3, c6, bd, 5a, 40),
+ be64_to_cpua(00, 00, 00, 00, 00, 00, 01, 70) },
+ .c_size = ECC_MAX_BYTES * 2,
+ .public_key_vec = true,
+ },
+};
+
+/*
+ * ECDSA X9.62 test vectors.
+ *
+ * Identical to ECDSA test vectors, except signature in "c" is X9.62 encoded.
+ */
+static const struct sig_testvec x962_ecdsa_nist_p192_tv_template[] = {
+ {
+ .key = /* secp192r1(sha1) */
"\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
"\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
"\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
"\x98",
.key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
.m =
"\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
"\x63\x85\xe7\x82",
.m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
"\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
@@ -674,23 +1356,17 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
"\x80\x6f\xa5\x79\x77\xda\xd0",
.c_size = 55,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp192r1(sha224) */
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
"\xa3",
.key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
.m =
"\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40"
"\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11",
.m_size = 28,
- .algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b"
"\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14"
@@ -698,23 +1374,17 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
"\x5c\x99\xdb\x92\x5b\x36",
.c_size = 54,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp192r1(sha256) */
"\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae"
"\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56"
"\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58"
"\x91",
.key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
.m =
"\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd"
"\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7",
.m_size = 32,
- .algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56"
"\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3"
@@ -722,24 +1392,18 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
"\x3a\x97\xd9\xcd\x1a\x6a\x49",
.c_size = 55,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp192r1(sha384) */
"\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7"
"\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f"
"\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e"
"\x8b",
.key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
.m =
"\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9"
"\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61"
"\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78",
.m_size = 48,
- .algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34"
"\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64"
@@ -747,25 +1411,19 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
"\x12\x3b\x3b\x28\xfb\x6d\xe1",
.c_size = 55,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp192r1(sha512) */
"\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea"
"\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d"
"\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11"
"\x57",
.key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
.m =
"\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23"
"\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67"
"\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70"
"\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5",
.m_size = 64,
- .algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07"
"\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73"
@@ -773,28 +1431,22 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
"\x6a\xdf\x97\xfd\x82\x76\x24",
.c_size = 55,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
};
-static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
+static const struct sig_testvec x962_ecdsa_nist_p256_tv_template[] = {
{
- .key =
+ .key = /* secp256r1(sha1) */
"\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
"\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
"\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
"\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
"\xaf",
.key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
.m =
"\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
"\x0b\xde\x6a\x42",
.m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
"\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
@@ -803,24 +1455,18 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
"\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
.c_size = 72,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp256r1(sha224) */
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
"\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0"
"\xd4",
.key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
.m =
"\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41"
"\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0",
.m_size = 28,
- .algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59"
"\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25"
@@ -829,24 +1475,18 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
"\x2e\x8b\xde\x5a\x04\x0e",
.c_size = 70,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp256r1(sha256) */
"\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
"\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
"\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
"\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
"\xb8",
.key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
.m =
"\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
"\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
.m_size = 32,
- .algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63"
"\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67"
@@ -855,25 +1495,19 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
"\x2a\x65\x35\x23\xe3\x1d\xfa",
.c_size = 71,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp256r1(sha384) */
"\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d"
"\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e"
"\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00"
"\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2"
"\x7c",
.key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
.m =
"\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6"
"\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94"
"\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb",
.m_size = 48,
- .algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95"
"\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c"
@@ -882,26 +1516,20 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
"\xc0\x60\x11\x92\xdc\x17\x89\x12",
.c_size = 72,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
- .key =
+ .key = /* secp256r1(sha512) */
"\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a"
"\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38"
"\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b"
"\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92"
"\xbf",
.key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
.m =
"\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9"
"\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca"
"\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf"
"\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6",
.m_size = 64,
- .algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44"
"\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04"
@@ -910,11 +1538,10 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
"\x31\x79\x4a\xe9\x81\x6a\xee",
.c_size = 71,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
};
-static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
+static const struct sig_testvec x962_ecdsa_nist_p384_tv_template[] = {
{
.key = /* secp384r1(sha1) */
"\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
@@ -925,15 +1552,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
"\xf1",
.key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
.m =
"\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
"\x3a\x69\xc1\x93",
.m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
"\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
@@ -944,7 +1566,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
.c_size = 104,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
@@ -955,15 +1576,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d"
"\xe0",
.key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
.m =
"\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd"
"\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5",
.m_size = 28,
- .algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4"
"\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4"
@@ -974,7 +1590,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x88\x2b\x82\x26\x5e\x1c\xda\xfb",
.c_size = 104,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha256) */
"\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a"
@@ -985,15 +1600,10 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc"
"\xab",
.key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
.m =
"\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3"
"\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2",
.m_size = 32,
- .algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce"
"\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84"
@@ -1004,7 +1614,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\xf4\x1f\x39\xca\x4d\x43",
.c_size = 102,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha384) */
"\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f"
@@ -1015,16 +1624,11 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89"
"\x9e",
.key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
.m =
"\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf"
"\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1"
"\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff",
.m_size = 48,
- .algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62"
"\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb"
@@ -1035,7 +1639,6 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\xab\x8d\x4e\xde\xe6\x6d\x9b\x66",
.c_size = 104,
.public_key_vec = true,
- .siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha512) */
"\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46"
@@ -1046,17 +1649,12 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac"
"\xa3",
.key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
.m =
"\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1"
"\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71"
"\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc"
"\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e",
.m_size = 64,
- .algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02"
"\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3"
@@ -1067,14 +1665,163 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
"\x3c\x93\xff\x50\x5d",
.c_size = 101,
.public_key_vec = true,
- .siggen_sigver_test = true,
+ },
+};
+
+static const struct sig_testvec x962_ecdsa_nist_p521_tv_template[] = {
+ {
+ .key = /* secp521r1(sha224) */
+ "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0"
+ "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7"
+ "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61"
+ "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb"
+ "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a"
+ "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76"
+ "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47"
+ "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f"
+ "\x3b\x83\x82\x2f\x14",
+ .key_len = 133,
+ .m =
+ "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4"
+ "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb",
+ .m_size = 28,
+ .c =
+ "\x30\x81\x86\x02\x41\x01\xd6\x43\xe7\xff\x42\xb2\xba\x74\x35\xf6"
+ "\xdc\x6d\x02\x7b\x22\xac\xe2\xef\x07\x92\xee\x60\x94\x06\xf8\x3f"
+ "\x59\x0f\x74\xf0\x3f\xd8\x18\xc6\x37\x8a\xcb\xa7\xd8\x7d\x98\x85"
+ "\x29\x88\xff\x0b\x94\x94\x6c\xa6\x9b\x89\x8b\x1e\xfd\x09\x46\x6b"
+ "\xc7\xaf\x7a\xb9\x19\x0a\x02\x41\x3a\x26\x0d\x55\xcd\x23\x1e\x7d"
+ "\xa0\x5e\xf9\x88\xf3\xd2\x32\x90\x57\x0f\xf8\x65\x97\x6b\x09\x4d"
+ "\x22\x26\x0b\x5f\x49\x32\x6b\x91\x99\x30\x90\x0f\x1c\x8f\x78\xd3"
+ "\x9f\x0e\x64\xcc\xc4\xe8\x43\xd9\x0e\x1c\xad\x22\xda\x82\x00\x35"
+ "\xa3\x50\xb1\xa5\x98\x92\x2a\xa5\x52",
+ .c_size = 137,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha256) */
+ "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae"
+ "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14"
+ "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36"
+ "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81"
+ "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c"
+ "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09"
+ "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05"
+ "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3"
+ "\xe6\x77\x1e\x1f\x8a",
+ .key_len = 133,
+ .m =
+ "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39"
+ "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73",
+ .m_size = 32,
+ .c =
+ "\x30\x81\x88\x02\x42\x00\xcd\xa5\x5f\x57\x52\x27\x78\x3a\xb5\x06"
+ "\x0f\xfd\x83\xfc\x0e\xd9\xce\x50\x9f\x7d\x1f\xca\x8b\xa8\x2d\x56"
+ "\x3c\xf6\xf0\xd8\xe1\xb7\x5d\x95\x35\x6f\x02\x0e\xaf\xe1\x4c\xae"
+ "\xce\x54\x76\x9a\xc2\x8f\xb8\x38\x1f\x46\x0b\x04\x64\x34\x79\xde"
+ "\x7e\xd7\x59\x10\xe9\xd9\xd5\x02\x42\x01\xcf\x50\x85\x38\xf9\x15"
+ "\x83\x18\x04\x6b\x35\xae\x65\xb5\x99\x12\x0a\xa9\x79\x24\xb9\x37"
+ "\x35\xdd\xa0\xe0\x87\x2c\x44\x4b\x5a\xee\xaf\xfa\x10\xdd\x9b\xfb"
+ "\x36\x1a\x31\x03\x42\x02\x5f\x50\xf0\xa2\x0d\x1c\x57\x56\x8f\x12"
+ "\xb7\x1d\x91\x55\x38\xb6\xf6\x34\x65\xc7\xbd",
+ .c_size = 139,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha384) */
+ "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b"
+ "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33"
+ "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4"
+ "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c"
+ "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47"
+ "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14"
+ "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46"
+ "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39"
+ "\xb3\x3b\x5b\x1b\x94",
+ .key_len = 133,
+ .m =
+ "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26"
+ "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59"
+ "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76",
+ .m_size = 48,
+ .c =
+ "\x30\x81\x88\x02\x42\x00\x93\x96\x76\x3c\x27\xea\xaa\x9c\x26\xec"
+ "\x51\xdc\xe8\x35\x5e\xae\x16\xf2\x4b\x64\x98\xf7\xec\xda\xc7\x7e"
+ "\x42\x71\x86\x57\x2d\xf1\x7d\xe4\xdf\x9b\x7d\x9e\x47\xca\x33\x32"
+ "\x76\x06\xd0\xf9\xc0\xe4\xe6\x84\x59\xfd\x1a\xc4\x40\xdd\x43\xb8"
+ "\x6a\xdd\xfb\xe6\x63\x4e\x28\x02\x42\x00\xff\xc3\x6a\x87\x6e\xb5"
+ "\x13\x1f\x20\x55\xce\x37\x97\xc9\x05\x51\xe5\xe4\x3c\xbc\x93\x65"
+ "\x57\x1c\x30\xda\xa7\xcd\x26\x28\x76\x3b\x52\xdf\xc4\xc0\xdb\x54"
+ "\xdb\x8a\x0d\x6a\xc3\xf3\x7a\xd1\xfa\xe7\xa7\xe5\x5a\x94\x56\xcf"
+ "\x8f\xb4\x22\xc6\x4f\xab\x2b\x62\xc1\x42\xb1",
+ .c_size = 139,
+ .public_key_vec = true,
+ },
+ {
+ .key = /* secp521r1(sha512) */
+ "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f"
+ "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a"
+ "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f"
+ "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee"
+ "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e"
+ "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6"
+ "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77"
+ "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae"
+ "\xd0\x17\xdf\x49\x6a",
+ .key_len = 133,
+ .m =
+ "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69"
+ "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed"
+ "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3"
+ "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68",
+ .m_size = 64,
+ .c =
+ "\x30\x81\x88\x02\x42\x01\x5c\x71\x86\x96\xac\x21\x33\x7e\x4e\xaa"
+ "\x86\xec\xa8\x05\x03\x52\x56\x63\x0e\x02\xcc\x94\xa9\x05\xb9\xfb"
+ "\x62\x1e\x42\x03\x6c\x74\x8a\x1f\x12\x3e\xb7\x7e\x51\xff\x7f\x27"
+ "\x93\xe8\x6c\x49\x7d\x28\xfc\x80\xa6\x13\xfc\xb6\x90\xf7\xbb\x28"
+ "\xb5\x04\xb0\xb6\x33\x1c\x7e\x02\x42\x01\x70\x43\x52\x1d\xe3\xc6"
+ "\xbd\x5a\x40\x95\x35\x89\x4f\x41\x5f\x9e\x19\x88\x05\x3e\x43\x39"
+ "\x01\xbd\xb7\x7a\x76\x37\x51\x47\x49\x98\x12\x71\xd0\xe9\xca\xa7"
+ "\xc0\xcb\xaa\x00\x55\xbb\x6a\xb4\x73\x00\xd2\x72\x74\x13\x63\x39"
+ "\xa6\xe5\x25\x46\x1e\x77\x44\x78\xe0\xd1\x04",
+ .c_size = 139,
+ .public_key_vec = true,
+ },
+};
+
+/*
+ * ECDSA P1363 test vectors.
+ *
+ * Identical to ECDSA test vectors, except signature in "c" is P1363 encoded.
+ */
+static const struct sig_testvec p1363_ecdsa_nist_p256_tv_template[] = {
+ {
+ .key = /* secp256r1(sha256) */
+ "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
+ "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
+ "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
+ "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
+ "\xb8",
+ .key_len = 65,
+ .m =
+ "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
+ "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
+ .m_size = 32,
+ .c =
+ "\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63\xa8\x1a\xad\xfc"
+ "\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67\xdc\x0c\xd0\x82"
+ "\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9\xa9\x81\xac\x4a\x50\xd0\x91"
+ "\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f\x2a\x65\x35\x23\xe3\x1d\xfa",
+ .c_size = 64,
+ .public_key_vec = true,
},
};
/*
* EC-RDSA test vectors are generated by gost-engine.
*/
-static const struct akcipher_testvec ecrdsa_tv_template[] = {
+static const struct sig_testvec ecrdsa_tv_template[] = {
{
.key =
"\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05"
@@ -1099,7 +1846,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = {
"\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9",
.m_size = 32,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
{
.key =
@@ -1125,7 +1871,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = {
"\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40",
.m_size = 32,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
{
.key =
@@ -1151,7 +1896,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = {
"\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39",
.m_size = 32,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
{
.key =
@@ -1186,7 +1930,6 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = {
"\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f",
.m_size = 64,
.public_key_vec = true,
- .siggen_sigver_test = true,
},
{
.key =
@@ -1221,14 +1964,68 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = {
"\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc",
.m_size = 64,
.public_key_vec = true,
- .siggen_sigver_test = true,
+ },
+};
+
+/*
+ * PKCS#1 RSA test vectors for hash algorithm "none"
+ * (i.e. the hash in "m" is not prepended by a Full Hash Prefix)
+ *
+ * Obtained from:
+ * https://vcsjones.dev/sometimes-valid-rsa-dotnet/
+ * https://gist.github.com/vcsjones/ab4c2327b53ed018eada76b75ef4fd99
+ */
+static const struct sig_testvec pkcs1_rsa_none_tv_template[] = {
+ {
+ .key =
+ "\x30\x82\x01\x0a\x02\x82\x01\x01\x00\xa2\x63\x0b\x39\x44\xb8\xbb"
+ "\x23\xa7\x44\x49\xbb\x0e\xff\xa1\xf0\x61\x0a\x53\x93\xb0\x98\xdb"
+ "\xad\x2c\x0f\x4a\xc5\x6e\xff\x86\x3c\x53\x55\x0f\x15\xce\x04\x3f"
+ "\x2b\xfd\xa9\x96\x96\xd9\xbe\x61\x79\x0b\x5b\xc9\x4c\x86\x76\xe5"
+ "\xe0\x43\x4b\x22\x95\xee\xc2\x2b\x43\xc1\x9f\xd8\x68\xb4\x8e\x40"
+ "\x4f\xee\x85\x38\xb9\x11\xc5\x23\xf2\x64\x58\xf0\x15\x32\x6f\x4e"
+ "\x57\xa1\xae\x88\xa4\x02\xd7\x2a\x1e\xcd\x4b\xe1\xdd\x63\xd5\x17"
+ "\x89\x32\x5b\xb0\x5e\x99\x5a\xa8\x9d\x28\x50\x0e\x17\xee\x96\xdb"
+ "\x61\x3b\x45\x51\x1d\xcf\x12\x56\x0b\x92\x47\xfc\xab\xae\xf6\x66"
+ "\x3d\x47\xac\x70\x72\xe7\x92\xe7\x5f\xcd\x10\xb9\xc4\x83\x64\x94"
+ "\x19\xbd\x25\x80\xe1\xe8\xd2\x22\xa5\xd0\xba\x02\x7a\xa1\x77\x93"
+ "\x5b\x65\xc3\xee\x17\x74\xbc\x41\x86\x2a\xdc\x08\x4c\x8c\x92\x8c"
+ "\x91\x2d\x9e\x77\x44\x1f\x68\xd6\xa8\x74\x77\xdb\x0e\x5b\x32\x8b"
+ "\x56\x8b\x33\xbd\xd9\x63\xc8\x49\x9d\x3a\xc5\xc5\xea\x33\x0b\xd2"
+ "\xf1\xa3\x1b\xf4\x8b\xbe\xd9\xb3\x57\x8b\x3b\xde\x04\xa7\x7a\x22"
+ "\xb2\x24\xae\x2e\xc7\x70\xc5\xbe\x4e\x83\x26\x08\xfb\x0b\xbd\xa9"
+ "\x4f\x99\x08\xe1\x10\x28\x72\xaa\xcd\x02\x03\x01\x00\x01",
+ .key_len = 270,
+ .m =
+ "\x68\xb4\xf9\x26\x34\x31\x25\xdd\x26\x50\x13\x68\xc1\x99\x26\x71"
+ "\x19\xa2\xde\x81",
+ .m_size = 20,
+ .c =
+ "\x6a\xdb\x39\xe5\x63\xb3\x25\xde\x58\xca\xc3\xf1\x36\x9c\x0b\x36"
+ "\xb7\xd6\x69\xf9\xba\xa6\x68\x14\x8c\x24\x52\xd3\x25\xa5\xf3\xad"
+ "\xc9\x47\x44\xde\x06\xd8\x0f\x56\xca\x2d\xfb\x0f\xe9\x99\xe2\x9d"
+ "\x8a\xe8\x7f\xfb\x9a\x99\x96\xf1\x2c\x4a\xe4\xc0\xae\x4d\x29\x47"
+ "\x38\x96\x51\x2f\x6d\x8e\xb8\x88\xbd\x1a\x0a\x70\xbc\x23\x38\x67"
+ "\x62\x22\x01\x23\x71\xe5\xbb\x95\xea\x6b\x8d\x31\x62\xbf\xf0\xc4"
+ "\xb9\x46\xd6\x67\xfc\x4c\xe6\x1f\xd6\x5d\xf7\xa9\xad\x3a\xf1\xbf"
+ "\xa2\xf9\x66\xde\xb6\x8e\xec\x8f\x81\x8d\x1e\x3a\x12\x27\x6a\xfc"
+ "\xae\x92\x9f\xc3\x87\xc3\xba\x8d\x04\xb8\x8f\x0f\x61\x68\x9a\x96"
+ "\x2c\x80\x2c\x32\x40\xde\x9d\xb9\x9b\xe2\xe4\x45\x2e\x91\x47\x5c"
+ "\x47\xa4\x9d\x02\x57\x59\xf7\x75\x5d\x5f\x32\x82\x75\x5d\xe5\x78"
+ "\xc9\x19\x61\x46\x06\x9d\xa5\x1d\xd6\x32\x48\x9a\xdb\x09\x29\x81"
+ "\x14\x2e\xf0\x27\xe9\x37\x13\x74\xec\xa5\xcd\x67\x6b\x19\xf6\x88"
+ "\xf0\xc2\x8b\xa8\x7f\x2f\x76\x5a\x3e\x0c\x47\x5d\xe8\x82\x50\x27"
+ "\x40\xce\x27\x41\x45\xa0\xcf\xaa\x2f\xd3\xad\x3c\xbf\x73\xff\x93"
+ "\xe3\x78\x49\xd9\xa9\x78\x22\x81\x9a\xe5\xe2\x94\xe9\x40\xab\xf1",
+ .c_size = 256,
+ .public_key_vec = true,
},
};
/*
* PKCS#1 RSA test vectors. Obtained from CAVS testing.
*/
-static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
+static const struct sig_testvec pkcs1_rsa_tv_template[] = {
{
.key =
"\x30\x82\x04\xa5\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
@@ -1340,7 +2137,6 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
"\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b"
"\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2",
.c_size = 256,
- .siggen_sigver_test = true,
}
};
@@ -2990,1231 +3786,6 @@ static const struct kpp_testvec ffdhe8192_dh_tv_template[] __maybe_unused = {
},
};
-static const struct kpp_testvec curve25519_tv_template[] = {
-{
- .secret = (u8[32]){ 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
- 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
- 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
- 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
- .b_public = (u8[32]){ 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
- 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
- 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
- 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
- .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
- 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
- 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
- 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
- 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
- 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
- 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
- .b_public = (u8[32]){ 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
- 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
- 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
- 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
- .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
- 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
- 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
- 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 1 },
- .b_public = (u8[32]){ 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
- 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
- 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
- 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 1 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
- 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
- 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
- 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
- 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
- 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
- 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
- .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
- 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
- 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
- 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
- .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
- 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
- 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
- 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
- .expected_ss = (u8[32]){ 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
- 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
- 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
- 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-{
- .secret = (u8[32]){ 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
- .expected_ss = (u8[32]){ 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
- 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
- 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
- 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - normal case */
-{
- .secret = (u8[32]){ 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
- 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
- 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
- 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
- .b_public = (u8[32]){ 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
- 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
- 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
- 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
- .expected_ss = (u8[32]){ 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
- 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
- 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
- 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key on twist */
-{
- .secret = (u8[32]){ 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
- 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
- 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
- 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
- .b_public = (u8[32]){ 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
- 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
- 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
- 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
- .expected_ss = (u8[32]){ 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
- 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
- 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
- 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key on twist */
-{
- .secret = (u8[32]){ 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
- 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
- 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
- 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
- .b_public = (u8[32]){ 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
- 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
- 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
- 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
- .expected_ss = (u8[32]){ 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
- 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
- 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
- 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key on twist */
-{
- .secret = (u8[32]){ 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
- 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
- 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
- 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
- .b_public = (u8[32]){ 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
- 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
- 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
- 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
- .expected_ss = (u8[32]){ 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
- 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
- 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
- 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key on twist */
-{
- .secret = (u8[32]){ 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
- 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
- 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
- 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
- .b_public = (u8[32]){ 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
- 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
- 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
- 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
- .expected_ss = (u8[32]){ 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
- 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
- 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
- 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key on twist */
-{
- .secret = (u8[32]){ 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
- 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
- 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
- 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
- .b_public = (u8[32]){ 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
- 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
- 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
- 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
- .expected_ss = (u8[32]){ 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
- 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
- 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
- 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
- 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
- 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
- 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
- .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
- 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
- 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
- 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
- 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
- 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
- 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
- .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
- 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
- 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
- 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
- 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
- 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
- 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
- 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
- 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
- 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
- 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
- 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
- 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
- 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
- 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
- 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
- .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
- 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
- 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
- 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
- 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
- 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
- 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
- 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
- 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
- .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
- 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
- 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
- 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case on twist */
-{
- .secret = (u8[32]){ 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
- 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
- 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
- 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
- .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
- 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
- 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
- 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
- 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
- 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
- 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
- .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
- 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
- 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
- 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
- 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
- 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
- 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
- .expected_ss = (u8[32]){ 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
- 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
- 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
- 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
- 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
- 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
- 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
- .expected_ss = (u8[32]){ 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
- 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
- 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
- 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
- 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
- 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
- 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
- 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
- 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
- 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
- .expected_ss = (u8[32]){ 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
- 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
- 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
- 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
- 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
- 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
- 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
- .expected_ss = (u8[32]){ 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
- 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
- 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
- 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
- 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
- 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
- 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
- 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
- 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
- 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for public key */
-{
- .secret = (u8[32]){ 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
- 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
- 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
- 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
- .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
- 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
- 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
- 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
- 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
- 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
- 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
- .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
- 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
- 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
- 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
- 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
- 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
- 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
- .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
- 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
- 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
- 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
- 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
- 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
- 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
- .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
- 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
- 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
- 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
- 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
- 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
- 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .expected_ss = (u8[32]){ 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
- 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
- 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
- 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
- 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
- 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
- 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
- .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
- .expected_ss = (u8[32]){ 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
- 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
- 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
- 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
- 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
- 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
- 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
- .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
- .expected_ss = (u8[32]){ 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
- 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
- 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
- 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
- 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
- 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
- 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
- .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
- .expected_ss = (u8[32]){ 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
- 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
- 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
- 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
- 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
- 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
- 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
- .b_public = (u8[32]){ 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
- 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
- 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
- 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
- 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
- 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
- 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
- .b_public = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
- 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
- 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
- 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
- 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
- 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
- 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
- .b_public = (u8[32]){ 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
- 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
- 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
- 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
- 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
- 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
- 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
- .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
- 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
- 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
- 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
- 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
- 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
- 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
- .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
- 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
- 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
- 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
- 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
- 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
- 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
- .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
- 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
- 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
- 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
- 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
- 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
- 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
- .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
- 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
- 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
- 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
- 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
- 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
- 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
- .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
- 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
- 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
- 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - public key >= p */
-{
- .secret = (u8[32]){ 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
- 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
- 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
- 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
- .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .expected_ss = (u8[32]){ 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
- 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
- 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
- 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - RFC 7748 */
-{
- .secret = (u8[32]){ 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
- 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
- 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
- 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
- .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
- 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
- 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
- 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
- .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
- 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
- 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
- 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - RFC 7748 */
-{
- .secret = (u8[32]){ 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
- 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
- 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
- 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
- .b_public = (u8[32]){ 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
- 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
- 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
- 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
- .expected_ss = (u8[32]){ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
- 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
- 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
- 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
- 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
- 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
- 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
- .expected_ss = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
- 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
- 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
- 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
- .expected_ss = (u8[32]){ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
- 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
- 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
- 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
- .expected_ss = (u8[32]){ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
- 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
- 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
- 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
- .expected_ss = (u8[32]){ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
- 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
- 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
- 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
- .expected_ss = (u8[32]){ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
- 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
- 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
- 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
- .expected_ss = (u8[32]){ 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
- 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
- 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
- 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
- .expected_ss = (u8[32]){ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
- 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
- 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
- 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
- .expected_ss = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
- 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
- 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
- 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
- .expected_ss = (u8[32]){ 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
- 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
- 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
- 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
- .expected_ss = (u8[32]){ 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
- 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
- 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
- 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
- .expected_ss = (u8[32]){ 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
- 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
- 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
- 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
- .expected_ss = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
- 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
- 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
- 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
- .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - edge case for shared secret */
-{
- .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
- 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
- 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
- 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
- .b_public = (u8[32]){ 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
- 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
- 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
- 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
- .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - checking for overflow */
-{
- .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
- 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
- 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
- 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
- .b_public = (u8[32]){ 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
- 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
- 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
- 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
- .expected_ss = (u8[32]){ 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
- 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
- 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
- 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - checking for overflow */
-{
- .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
- 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
- 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
- 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
- .b_public = (u8[32]){ 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
- 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
- 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
- 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
- .expected_ss = (u8[32]){ 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
- 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
- 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
- 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - checking for overflow */
-{
- .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
- 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
- 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
- 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
- .b_public = (u8[32]){ 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
- 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
- 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
- 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
- .expected_ss = (u8[32]){ 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
- 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
- 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
- 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - checking for overflow */
-{
- .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
- 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
- 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
- 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
- .b_public = (u8[32]){ 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
- 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
- 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
- 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
- .expected_ss = (u8[32]){ 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
- 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
- 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
- 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - checking for overflow */
-{
- .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
- 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
- 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
- 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
- .b_public = (u8[32]){ 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
- 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
- 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
- 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
- .expected_ss = (u8[32]){ 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
- 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
- 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
- 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - private key == -1 (mod order) */
-{
- .secret = (u8[32]){ 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
- 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
- .b_public = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
- 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
- 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
- 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
- .expected_ss = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
- 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
- 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
- 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-},
-/* wycheproof - private key == 1 (mod order) on twist */
-{
- .secret = (u8[32]){ 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
- 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
- .b_public = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
- 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
- 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
- 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
- .expected_ss = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
- 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
- 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
- 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
- .secret_size = 32,
- .b_public_size = 32,
- .expected_ss_size = 32,
-
-}
-};
-
static const struct kpp_testvec ecdh_p192_tv_template[] = {
{
.secret =
@@ -5209,309 +4780,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
}
};
-static const u8 zeroes[4096] = { [0 ... 4095] = 0 };
-static const u8 ones[4096] = { [0 ... 4095] = 0xff };
-
-static const struct hash_testvec crc64_rocksoft_tv_template[] = {
- {
- .plaintext = zeroes,
- .psize = 4096,
- .digest = "\x4e\xb6\x22\xeb\x67\xd3\x82\x64",
- }, {
- .plaintext = ones,
- .psize = 4096,
- .digest = "\xac\xa3\xec\x02\x73\xba\xdd\xc0",
- }
-};
-
-static const struct hash_testvec crct10dif_tv_template[] = {
- {
- .plaintext = "abc",
- .psize = 3,
- .digest = (u8 *)(u16 []){ 0x443b },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
- .digest = (u8 *)(u16 []){ 0x4b70 },
- }, {
- .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
- "ddddddddddddd",
- .psize = 56,
- .digest = (u8 *)(u16 []){ 0x9ce3 },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 319,
- .digest = (u8 *)(u16 []){ 0x44c6 },
- }, {
- .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
- "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
- "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
- "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
- "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
- "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
- "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
- "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
- "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
- "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
- "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
- "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
- "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
- "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
- "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
- "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
- "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
- "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
- "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
- "\x58\xef\x63\xfa\x91\x05\x9c\x33"
- "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
- "\x19\xb0\x47\xde\x52\xe9\x80\x17"
- "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
- "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
- "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
- "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
- "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
- "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
- "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
- "\x86\x1d\x91\x28\xbf\x33\xca\x61"
- "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
- "\x47\xde\x75\x0c\x80\x17\xae\x22"
- "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
- "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
- "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
- "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
- "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
- "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
- "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
- "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
- "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
- "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
- "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
- "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
- "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
- "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
- "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
- "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
- "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
- "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
- "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
- "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
- "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
- "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
- "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
- "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
- "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
- "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
- "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
- "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
- "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
- "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
- "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
- "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
- "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
- "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
- "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
- "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
- "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
- "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
- "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
- "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
- "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
- "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
- "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
- "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
- "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
- "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
- "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
- "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
- "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
- "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
- "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
- "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
- "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
- "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
- "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
- "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
- "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
- "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
- "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
- "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
- "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
- "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
- "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
- "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
- "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
- "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
- "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
- "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
- "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
- "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
- "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
- "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
- "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
- "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
- "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
- "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
- "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
- "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
- "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
- "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
- "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
- "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
- "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
- "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
- "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
- "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
- "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
- "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
- "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
- "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
- "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
- "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
- "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
- "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
- "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
- "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
- "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
- "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
- "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
- "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
- "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
- "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
- "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
- "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
- "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
- "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
- "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
- "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
- "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
- "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
- "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
- "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
- "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
- "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
- "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
- "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
- "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
- "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
- "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
- "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
- "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
- "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
- "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
- "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
- "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
- "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
- "\x47\xde\x52\xe9\x80\x17\x8b\x22"
- "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
- "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
- "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
- "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
- "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
- "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
- "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
- "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
- "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
- "\x75\x0c\x80\x17\xae\x22\xb9\x50"
- "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
- "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
- "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
- "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
- "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
- "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
- "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
- "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
- "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
- "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
- "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
- "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
- "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
- "\x48\xdf\x53\xea\x81\x18\x8c\x23"
- "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
- "\x09\xa0\x37\xce\x42\xd9\x70\x07"
- "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
- "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
- "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
- "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
- "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
- "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
- "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
- "\x76\x0d\x81\x18\xaf\x23\xba\x51"
- "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
- "\x37\xce\x65\xfc\x70\x07\x9e\x12"
- "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
- "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
- "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
- "\xff\x73\x0a\xa1\x15\xac\x43\xda"
- "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
- "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
- "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
- "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
- "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
- "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
- "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
- "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
- "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
- "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
- "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
- "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
- "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
- "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
- "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
- "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
- "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
- "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
- "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
- "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
- "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
- "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
- "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
- "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
- "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
- "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
- "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
- "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
- "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
- "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
- "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
- "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
- "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
- "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
- "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
- "\xef\x86\x1d\x91\x28\xbf\x33\xca"
- "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
- "\xd3\x47\xde\x75\x0c\x80\x17\xae"
- "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
- "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
- "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
- "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
- "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
- "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
- "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
- "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
- "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
- "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
- "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
- "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
- "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
- "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
- "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
- "\x67\xfe\x95\x09\xa0\x37\xce\x42"
- "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
- "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
- "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
- .psize = 2048,
- .digest = (u8 *)(u16 []){ 0x23ca },
- }
-};
-
/*
* Streebog test vectors from RFC 6986 and GOST R 34.11-2012
*/
@@ -5628,65 +4896,6 @@ static const struct hash_testvec hmac_streebog512_tv_template[] = {
},
};
-/*
- * SM2 test vectors.
- */
-static const struct akcipher_testvec sm2_tv_template[] = {
- { /* Generated from openssl */
- .key =
- "\x04"
- "\x8e\xa0\x33\x69\x91\x7e\x3d\xec\xad\x8e\xf0\x45\x5e\x13\x3e\x68"
- "\x5b\x8c\xab\x5c\xc6\xc8\x50\xdf\x91\x00\xe0\x24\x73\x4d\x31\xf2"
- "\x2e\xc0\xd5\x6b\xee\xda\x98\x93\xec\xd8\x36\xaa\xb9\xcf\x63\x82"
- "\xef\xa7\x1a\x03\xed\x16\xba\x74\xb8\x8b\xf9\xe5\x70\x39\xa4\x70",
- .key_len = 65,
- .param_len = 0,
- .c =
- "\x30\x45"
- "\x02\x20"
- "\x70\xab\xb6\x7d\xd6\x54\x80\x64\x42\x7e\x2d\x05\x08\x36\xc9\x96"
- "\x25\xc2\xbb\xff\x08\xe5\x43\x15\x5e\xf3\x06\xd9\x2b\x2f\x0a\x9f"
- "\x02\x21"
- "\x00"
- "\xbf\x21\x5f\x7e\x5d\x3f\x1a\x4d\x8f\x84\xc2\xe9\xa6\x4c\xa4\x18"
- "\xb2\xb8\x46\xf4\x32\x96\xfa\x57\xc6\x29\xd4\x89\xae\xcc\xda\xdb",
- .c_size = 71,
- .algo = OID_SM2_with_SM3,
- .m =
- "\x47\xa7\xbf\xd3\xda\xc4\x79\xee\xda\x8b\x4f\xe8\x40\x94\xd4\x32"
- "\x8f\xf1\xcd\x68\x4d\xbd\x9b\x1d\xe0\xd8\x9a\x5d\xad\x85\x47\x5c",
- .m_size = 32,
- .public_key_vec = true,
- .siggen_sigver_test = true,
- },
- { /* From libgcrypt */
- .key =
- "\x04"
- "\x87\x59\x38\x9a\x34\xaa\xad\x07\xec\xf4\xe0\xc8\xc2\x65\x0a\x44"
- "\x59\xc8\xd9\x26\xee\x23\x78\x32\x4e\x02\x61\xc5\x25\x38\xcb\x47"
- "\x75\x28\x10\x6b\x1e\x0b\x7c\x8d\xd5\xff\x29\xa9\xc8\x6a\x89\x06"
- "\x56\x56\xeb\x33\x15\x4b\xc0\x55\x60\x91\xef\x8a\xc9\xd1\x7d\x78",
- .key_len = 65,
- .param_len = 0,
- .c =
- "\x30\x44"
- "\x02\x20"
- "\xd9\xec\xef\xe8\x5f\xee\x3c\x59\x57\x8e\x5b\xab\xb3\x02\xe1\x42"
- "\x4b\x67\x2c\x0b\x26\xb6\x51\x2c\x3e\xfc\xc6\x49\xec\xfe\x89\xe5"
- "\x02\x20"
- "\x43\x45\xd0\xa5\xff\xe5\x13\x27\x26\xd0\xec\x37\xad\x24\x1e\x9a"
- "\x71\x9a\xa4\x89\xb0\x7e\x0f\xc4\xbb\x2d\x50\xd0\xe5\x7f\x7a\x68",
- .c_size = 70,
- .algo = OID_SM2_with_SM3,
- .m =
- "\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff\x00"
- "\x12\x34\x56\x78\x9a\xbc\xde\xf0\x12\x34\x56\x78\x9a\xbc\xde\xf0",
- .m_size = 32,
- .public_key_vec = true,
- .siggen_sigver_test = true,
- },
-};
-
/* Example vectors below taken from
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
*
@@ -7809,159 +7018,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac64_string1[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
-};
-
-static const char vmac64_string2[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c',
-};
-
-static const char vmac64_string3[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
-};
-
-static const char vmac64_string4[33] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
- 'z',
-};
-
-static const char vmac64_string5[143] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
- ']', '%', '9', '2', '7', '!', 'A',
-};
-
-static const char vmac64_string6[145] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'p', 't', '*', '7', 'l', 'i', '!', '#',
- 'w', '0', 'z', '/', '4', 'A', 'n',
-};
-
-static const struct hash_testvec vmac64_aes_tv_template[] = {
- { /* draft-krovetz-vmac-01 test vector 1 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
- .psize = 16,
- .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
- }, { /* draft-krovetz-vmac-01 test vector 2 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
- .psize = 19,
- .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
- }, { /* draft-krovetz-vmac-01 test vector 3 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 64,
- .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
- }, { /* draft-krovetz-vmac-01 test vector 4 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 316,
- .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string4,
- .psize = sizeof(vmac64_string4),
- .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string5,
- .psize = sizeof(vmac64_string5),
- .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string6,
- .psize = sizeof(vmac64_string6),
- .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
- },
-};
-
/*
* SHA384 HMAC test vectors from RFC4231
*/
@@ -8543,294 +7599,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
},
};
-/*
- * Poly1305 test vectors from RFC7539 A.3.
- */
-
-static const struct hash_testvec poly1305_tv_template[] = {
- { /* Test Vector #1 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #2 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e",
- }, { /* Test Vector #3 */
- .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf"
- "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0",
- }, { /* Test Vector #4 */
- .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0"
- "\x27\x54\x77\x61\x73\x20\x62\x72"
- "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
- "\x6e\x64\x20\x74\x68\x65\x20\x73"
- "\x6c\x69\x74\x68\x79\x20\x74\x6f"
- "\x76\x65\x73\x0a\x44\x69\x64\x20"
- "\x67\x79\x72\x65\x20\x61\x6e\x64"
- "\x20\x67\x69\x6d\x62\x6c\x65\x20"
- "\x69\x6e\x20\x74\x68\x65\x20\x77"
- "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
- "\x20\x6d\x69\x6d\x73\x79\x20\x77"
- "\x65\x72\x65\x20\x74\x68\x65\x20"
- "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
- "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
- "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
- "\x72\x61\x74\x68\x73\x20\x6f\x75"
- "\x74\x67\x72\x61\x62\x65\x2e",
- .psize = 159,
- .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61"
- "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62",
- }, { /* Test Vector #5 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #6 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #7 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xf0\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x11\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #8 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .psize = 80,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #9 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xfd\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- }, { /* Test Vector #10 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x14\x00\x00\x00\x00\x00\x00\x00"
- "\x55\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #11 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Regression test for overflow in AVX2 implementation */
- .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff",
- .psize = 300,
- .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
- "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
- }
-};
-
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
static const struct hash_testvec nhpoly1305_tv_template[] = {
{
@@ -10243,6 +9011,126 @@ static const struct cipher_testvec des_tv_template[] = {
.ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
.ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
.len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xe0\xe0\xe0\xf1\xf1\xf1\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xfe\x01\xfe\x01\xfe\x01\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x01\xfe\x01\xfe\x01\xfe\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xe0\x1f\xe0\x0e\xf1\x0e\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x1f\xe0\x1f\xf1\x0e\xf1\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xe0\x01\xe0\x01\xf1\x01\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x01\xe0\x01\xf1\x01\xf1\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xfe\x1f\xfe\x0e\xfe\x0e\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x1f\xfe\x1f\xfe\x0e\xfe\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\x1f\x01\x1f\x01\x0e\x01\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x01\x1f\x01\x0e\x01\x0e\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xfe\xe0\xfe\xf1\xfe\xf1\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xe0\xfe\xe0\xfe\xf1\xfe\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
}, { /* Two blocks -- for testing encryption across pages */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
@@ -14784,102 +13672,351 @@ static const struct cipher_testvec sm4_ctr_rfc3686_tv_template[] = {
}
};
-static const struct cipher_testvec sm4_ofb_tv_template[] = {
- { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.3 */
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+static const struct cipher_testvec sm4_cts_tv_template[] = {
+ /* Generated from AES-CTS test vectors */
+ {
.klen = 16,
- .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10"
- "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1"
- "\x78\x6f\x53\xd7\x25\x3a\x70\x56"
- "\xf2\x07\x5d\x28\xb5\x23\x5f\x58"
- "\xd5\x00\x27\xe4\x17\x7d\x2b\xce",
- .len = 32,
- }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 1 */
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20",
+ .len = 17,
+ .ctext = "\x05\xfe\x23\xee\x17\xa2\x89\x98"
+ "\xbc\x97\x0a\x0b\x54\x67\xca\xd7"
+ "\xd6",
+ }, {
.klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
- "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
- "\xee\xee\xee\xee\xff\xff\xff\xff"
- "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
- .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16"
- "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7"
- "\x1d\x01\xac\xa2\x48\x7c\xa5\x82"
- "\xcb\xf5\x46\x3e\x66\x98\x53\x9b",
- .len = 32,
- }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 2 */
- .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10"
- "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20\x47\x65\x6e\x65\x72\x61\x6c"
+ "\x20\x47\x61\x75\x27\x73\x20",
+ .len = 31,
+ .ctext = "\x15\x46\xe4\x95\xa4\xec\xf0\xb8"
+ "\x49\xd6\x6a\x9d\x89\xc7\xfd\x70"
+ "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66"
+ "\x93\xf7\x70\xbb\xa8\x3f\xa3",
+ }, {
.klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
- "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
- "\xee\xee\xee\xee\xff\xff\xff\xff"
- "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
- .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65"
- "\x60\xd7\xf2\x65\x88\x70\x68\x49"
- "\x33\xfa\x16\xbd\x5c\xd9\xc8\x56"
- "\xca\xca\xa1\xe1\x01\x89\x7a\x97",
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20\x47\x65\x6e\x65\x72\x61\x6c"
+ "\x20\x47\x61\x75\x27\x73\x20\x43",
.len = 32,
+ .ctext = "\x89\xc7\x99\x3f\x87\x69\x5c\xd3"
+ "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3"
+ "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66"
+ "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf",
+ }, {
+ .klen = 16,
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20\x47\x65\x6e\x65\x72\x61\x6c"
+ "\x20\x47\x61\x75\x27\x73\x20\x43"
+ "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
+ "\x70\x6c\x65\x61\x73\x65\x2c",
+ .len = 47,
+ .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66"
+ "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf"
+ "\xd3\xe1\xdc\xeb\xfa\x04\x11\x99"
+ "\xde\xcf\x6f\x4d\x7b\x09\x92\x7f"
+ "\x89\xc7\x99\x3f\x87\x69\x5c\xd3"
+ "\x01\x6a\xbf\xd4\x3f\x79\x02",
+ }, {
+ .klen = 16,
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20\x47\x65\x6e\x65\x72\x61\x6c"
+ "\x20\x47\x61\x75\x27\x73\x20\x43"
+ "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
+ "\x70\x6c\x65\x61\x73\x65\x2c\x20",
+ .len = 48,
+ .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66"
+ "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf"
+ "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f"
+ "\xbd\x99\x21\x0c\x5e\x4d\xed\x20"
+ "\x89\xc7\x99\x3f\x87\x69\x5c\xd3"
+ "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3",
+ }, {
+ .klen = 16,
+ .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
+ "\x74\x65\x72\x69\x79\x61\x6b\x69",
+ .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
+ "\x6c\x69\x6b\x65\x20\x74\x68\x65"
+ "\x20\x47\x65\x6e\x65\x72\x61\x6c"
+ "\x20\x47\x61\x75\x27\x73\x20\x43"
+ "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
+ "\x70\x6c\x65\x61\x73\x65\x2c\x20"
+ "\x61\x6e\x64\x20\x77\x6f\x6e\x74"
+ "\x6f\x6e\x20\x73\x6f\x75\x70\x2e",
+ .len = 64,
+ .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66"
+ "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf"
+ "\x89\xc7\x99\x3f\x87\x69\x5c\xd3"
+ "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3"
+ "\x58\x19\xa4\x8f\xa9\x68\x5e\x6b"
+ "\x2c\x0f\x81\x60\x15\x98\x27\x4f"
+ "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f"
+ "\xbd\x99\x21\x0c\x5e\x4d\xed\x20",
}
};
-static const struct cipher_testvec sm4_cfb_tv_template[] = {
- { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.4 */
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .klen = 16,
- .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10"
- "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1"
- "\x78\x6f\x53\xd7\x25\x3a\x70\x56"
- "\x9e\xd2\x58\xa8\x5a\x04\x67\xcc"
- "\x92\xaa\xb3\x93\xdd\x97\x89\x95",
+static const struct cipher_testvec sm4_xts_tv_template[] = {
+ /* Generated from AES-XTS test vectors */
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ctext = "\xd9\xb4\x21\xf7\x31\xc8\x94\xfd"
+ "\xc3\x5b\x77\x29\x1f\xe4\xe3\xb0"
+ "\x2a\x1f\xb7\x66\x98\xd5\x9f\x0e"
+ "\x51\x37\x6c\x4a\xda\x5b\xc7\x5d",
.len = 32,
- }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 1 */
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
- "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
- "\xee\xee\xee\xee\xff\xff\xff\xff"
- "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
- .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16"
- "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7"
- "\x69\xd4\xc5\x4e\xd4\x33\xb9\xa0"
- "\x34\x60\x09\xbe\xb3\x7b\x2b\x3f",
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ctext = "\xa7\x4d\x72\x6c\x11\x19\x6a\x32"
+ "\xbe\x04\xe0\x01\xff\x29\xd0\xc7"
+ "\x93\x2f\x9f\x3e\xc2\x9b\xfc\xb6"
+ "\x4d\xd1\x7f\x63\xcb\xd3\xea\x31",
.len = 32,
- }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 2 */
- .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10"
- "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
- "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
- "\xee\xee\xee\xee\xff\xff\xff\xff"
- "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
- .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65"
- "\x60\xd7\xf2\x65\x88\x70\x68\x49"
- "\x0d\x9b\x86\xff\x20\xc3\xbf\xe1"
- "\x15\xff\xa0\x2c\xa6\x19\x2c\xc5",
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ctext = "\x7f\x76\x08\x8e\xff\xad\xf7\x0c"
+ "\x02\xea\x9f\x95\xda\x06\x28\xd3"
+ "\x51\xbf\xcb\x9e\xac\x05\x63\xbc"
+ "\xf1\x7b\x71\x0d\xab\x0a\x98\x26",
.len = 32,
- }
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ctext = "\x54\xdd\x65\xb6\x32\x6f\xae\xa8"
+ "\xfa\xd1\xa8\x3c\x63\x61\x4a\xf3"
+ "\x9f\x72\x1d\x8d\xfe\x17\x7a\x30"
+ "\xb6\x6a\xbf\x6a\x44\x99\x80\xe1"
+ "\xcd\xbe\x06\xaf\xb7\x33\x36\xf3"
+ "\x7a\x4d\x39\xde\x96\x4a\x30\xd7"
+ "\xd0\x4a\x37\x99\x16\x9c\x60\x25"
+ "\x8f\x6b\x74\x8a\x61\x86\x1a\xa5"
+ "\xec\x92\xa2\xc1\x5b\x2b\x7c\x61"
+ "\x5a\x42\xab\xa4\x99\xbb\xd6\xb7"
+ "\x1d\xb9\xc7\x89\xb2\x18\x20\x89"
+ "\xa2\x5d\xd3\xdf\x80\x0e\xd1\x86"
+ "\x4d\x19\xf7\xed\x45\xfd\x17\xa9"
+ "\x48\x0b\x0f\xb8\x2d\x9b\x7f\xc3"
+ "\xed\x57\xe9\xa1\x14\x0e\xaa\x77"
+ "\x8d\xd2\xdd\x67\x9e\x3e\xdc\x3d"
+ "\xc4\xd5\x5c\x95\x0e\xbc\x53\x1d"
+ "\x95\x92\xf7\xc4\x63\x82\x56\xd5"
+ "\x65\x18\x29\x2a\x20\xaf\x98\xfd"
+ "\xd3\xa6\x36\x00\x35\x0a\x70\xab"
+ "\x5a\x40\xf4\xc2\x85\x03\x7c\xa0"
+ "\x1f\x25\x1f\x19\xec\xae\x03\x29"
+ "\xff\x77\xad\x88\xcd\x5a\x4c\xde"
+ "\xa2\xae\xab\xc2\x21\x48\xff\xbd"
+ "\x23\x9b\xd1\x05\x15\xbd\xe1\x13"
+ "\x1d\xec\x84\x04\xe4\x43\xdc\x76"
+ "\x31\x40\xd5\xf2\x2b\xf3\x3e\x0c"
+ "\x68\x72\xd6\xb8\x1d\x63\x0f\x6f"
+ "\x00\xcd\xd0\x58\xfe\x80\xf9\xcb"
+ "\xfb\x77\x70\x7f\x93\xce\xe2\xca"
+ "\x92\xb9\x15\xb8\x30\x40\x27\xc1"
+ "\x90\xa8\x4e\x2d\x65\xe0\x18\xcc"
+ "\x6a\x38\x7d\x37\x66\xac\xdb\x28"
+ "\x25\x32\x84\xe8\xdb\x9a\xcf\x8f"
+ "\x52\x28\x0d\xdc\x6d\x00\x33\xd2"
+ "\xcc\xaa\xa4\xf9\xae\xff\x12\x36"
+ "\x69\xbc\x02\x4f\xd6\x76\x8e\xdf"
+ "\x8b\xc1\xf8\xd6\x22\xc1\x9c\x60"
+ "\x9e\xf9\x7f\x60\x91\x90\xcd\x11"
+ "\x02\x41\xe7\xfb\x08\x4e\xd8\x94"
+ "\x2d\xa1\xf9\xb9\xcf\x1b\x51\x4b"
+ "\x61\xa3\x88\xb3\x0e\xa6\x1a\x4a"
+ "\x74\x5b\x38\x1e\xe7\xad\x6c\x4d"
+ "\xb1\x27\x54\x53\xb8\x41\x3f\x98"
+ "\xdf\x6e\x4a\x40\x98\x6e\xe4\xb5"
+ "\x9a\xf5\xdf\xae\xcd\x30\x12\x65"
+ "\x17\x90\x67\xa0\x0d\x7c\xa3\x5a"
+ "\xb9\x5a\xbd\x61\x7a\xde\xa2\x8e"
+ "\xc1\xc2\x6a\x97\xde\x28\xb8\xbf"
+ "\xe3\x01\x20\xd6\xae\xfb\xd2\x58"
+ "\xc5\x9e\x42\xd1\x61\xe8\x06\x5a"
+ "\x78\x10\x6b\xdc\xa5\xcd\x90\xfb"
+ "\x3a\xac\x4e\x93\x86\x6c\x8a\x7f"
+ "\x96\x76\x86\x0a\x79\x14\x5b\xd9"
+ "\x2e\x02\xe8\x19\xa9\x0b\xe0\xb9"
+ "\x7c\xc5\x22\xb3\x21\x06\x85\x6f"
+ "\xdf\x0e\x54\xd8\x8e\x46\x24\x15"
+ "\x5a\x2f\x1c\x14\xea\xea\xa1\x63"
+ "\xf8\x58\xe9\x9a\x80\x6e\x79\x1a"
+ "\xcd\x82\xf1\xb0\xe2\x9f\x00\x28"
+ "\xa4\xc3\x8e\x97\x6f\x57\x1a\x93"
+ "\xf4\xfd\x57\xd7\x87\xc2\x4d\xb0"
+ "\xe0\x1c\xa3\x04\xe5\xa5\xc4\xdd"
+ "\x50\xcf\x8b\xdb\xf4\x91\xe5\x7c",
+ .len = 512,
+ }, {
+ .key = "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27"
+ "\x02\x88\x41\x97\x16\x93\x99\x37"
+ "\x51\x05\x82\x09\x74\x94\x45\x92",
+ .klen = 32,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xf8\xf9\xfa\xfb\xfc",
+ .ctext = "\xa2\x9f\x9e\x4e\x71\xdb\x28\x3c"
+ "\x80\x0e\xf6\xb7\x8e\x57\x1c\xba"
+ "\x90\xda\x3b\x6c\x22\x00\x68\x30"
+ "\x1d\x63\x0d\x9e\x6a\xad\x37\x55"
+ "\xbc\x77\x1e\xc9\xad\x83\x30\xd5"
+ "\x27\xb2\x66\x77\x18\x3c\xa6\x39"
+ "\x9c\x0a\xaa\x1f\x02\xe1\xd5\x65"
+ "\x9b\x8d\xc5\x97\x3d\xc5\x04\x53"
+ "\x78\x00\xe3\xb0\x1a\x43\x4e\xb7"
+ "\xc4\x9f\x38\xc5\x7b\xa4\x70\x64"
+ "\x78\xe6\x32\xd9\x65\x44\xc5\x64"
+ "\xb8\x42\x35\x99\xff\x66\x75\xb0"
+ "\x22\xd3\x9b\x6e\x8d\xcf\x6a\x24"
+ "\xfd\x92\xb7\x1b\x04\x28\x2a\x61"
+ "\xdc\x96\x2a\x20\x7a\x2c\xf1\xf9"
+ "\x12\x15\xf0\x4d\xcf\x2b\xde\x33"
+ "\x41\xbc\xe7\x85\x87\x22\xb7\x16"
+ "\x02\x1c\xd8\xa2\x0f\x1f\xa3\xe9"
+ "\xd8\x45\x48\xe7\xbe\x08\x4e\x4e"
+ "\x23\x79\x84\xdb\x40\x76\xf5\x13"
+ "\x78\x92\x4a\x2f\xf9\x1b\xf2\x80"
+ "\x25\x74\x51\x45\x9a\x77\x78\x97"
+ "\xd3\xe0\xc7\xc4\x35\x67\x2a\xe6"
+ "\xb3\x0d\x62\x9f\x8b",
+ .len = 189,
+ },
};
static const struct aead_testvec sm4_gcm_tv_template[] = {
@@ -14913,6 +14050,298 @@ static const struct aead_testvec sm4_gcm_tv_template[] = {
"\x83\xDE\x35\x41\xE4\xC2\xB5\x81"
"\x77\xE0\x65\xA9\xBF\x7B\x62\xEC",
.clen = 80,
+ }, { /* Generated from AES-GCM test vectors */
+ .key = zeroed_string,
+ .klen = 16,
+ .ctext = "\x23\x2f\x0c\xfe\x30\x8b\x49\xea"
+ "\x6f\xc8\x82\x29\xb5\xdc\x85\x8d",
+ .clen = 16,
+ }, {
+ .key = zeroed_string,
+ .klen = 16,
+ .ptext = zeroed_string,
+ .plen = 16,
+ .ctext = "\x7d\xe2\xaa\x7f\x11\x10\x18\x82"
+ "\x18\x06\x3b\xe1\xbf\xeb\x6d\x89"
+ "\xb8\x51\xb5\xf3\x94\x93\x75\x2b"
+ "\xe5\x08\xf1\xbb\x44\x82\xc5\x57",
+ .clen = 32,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
+ .plen = 64,
+ .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6"
+ "\x76\x21\x6a\x33\x83\x10\x41\xeb"
+ "\x09\x58\x00\x11\x7b\xdc\x3f\x75"
+ "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb"
+ "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07"
+ "\x1a\xe5\x48\x3f\xed\xde\x98\x5d"
+ "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf"
+ "\xe3\x63\x36\x83\x23\xf7\x5b\x80"
+ "\x7d\xfe\x77\xef\x71\xb1\x5e\xc9"
+ "\x52\x6b\x09\xab\x84\x28\x4b\x8a",
+ .clen = 80,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39",
+ .plen = 60,
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6"
+ "\x76\x21\x6a\x33\x83\x10\x41\xeb"
+ "\x09\x58\x00\x11\x7b\xdc\x3f\x75"
+ "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb"
+ "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07"
+ "\x1a\xe5\x48\x3f\xed\xde\x98\x5d"
+ "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf"
+ "\xe3\x63\x36\x83"
+ "\x89\xf6\xba\x35\xb8\x18\xd3\xcc"
+ "\x38\x6c\x05\xb3\x8a\xcb\xc9\xde",
+ .clen = 76,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39",
+ .plen = 60,
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = "\xc1\x11\x44\x51\xd9\x25\x87\x5b"
+ "\x0f\xd9\x06\xf3\x33\x44\xbb\x87"
+ "\x8b\xa3\x77\xd2\x0c\x60\xfa\xcc"
+ "\x85\x50\x6f\x96\x0c\x54\x54\xc1"
+ "\x58\x04\x88\x6e\xf4\x26\x35\x7e"
+ "\x94\x80\x48\x6c\xf2\xf4\x88\x1f"
+ "\x19\x63\xea\xae\xba\x81\x1a\x5d"
+ "\x0e\x6f\x59\x08"
+ "\x33\xac\x5b\xa8\x19\x60\xdb\x1d"
+ "\xdd\x2e\x22\x2e\xe0\x87\x51\x5d",
+ .clen = 76,
+ }, {
+ .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 16,
+ .iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff",
+ .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5",
+ .plen = 719,
+ .ctext = "\xdc\xb1\x0f\x2a\xe8\x2d\x1c\x57"
+ "\xc4\x82\xfa\xd6\x87\xe6\x2f\x50"
+ "\xbd\x9e\x0a\x42\x31\xf2\xc7\xbb"
+ "\x21\x63\xa7\x05\x43\x33\xef\x33"
+ "\x5c\xd3\x47\x55\xce\x5c\xe4\xd4"
+ "\xe5\x07\x62\x22\xac\x01\xa8\x35"
+ "\x9c\x59\x34\x30\x8e\xff\x9f\xb4"
+ "\xd2\x4e\x74\x90\x64\xf2\x78\x5e"
+ "\x63\xb7\xc5\x08\x1b\x37\xa5\x9e"
+ "\xc0\xde\xff\xa9\x7f\x0b\xd3\x02"
+ "\x83\x6e\x33\xfa\x43\x11\xd3\xda"
+ "\x02\xcf\xcd\x4a\xc0\x78\x1f\x39"
+ "\x62\xcb\xa3\x95\x7e\x13\x92\x28"
+ "\xb2\xc4\x7a\xba\xd1\xc6\xf6\x1f"
+ "\xda\x0b\xf1\xd1\x99\x54\xd8\x3b"
+ "\x16\xf8\xe6\x97\x1e\xa7\xcf\x49"
+ "\x69\x84\x01\x4c\xdc\x7a\x34\xff"
+ "\x01\x08\xa3\x0b\x39\xac\x21\x37"
+ "\xd8\xb4\x04\x19\x8b\x7a\x7d\x17"
+ "\x44\xd1\x18\xaf\x1f\xa9\x29\xfe"
+ "\xfa\x77\xe0\x40\x42\x0c\x79\xb7"
+ "\xc3\x15\x1b\xd9\x0c\x82\xfc\x16"
+ "\x70\xd6\x2a\xe9\x94\x72\xc5\xa5"
+ "\x8a\x58\xbc\xfa\xe0\x88\x39\x4a"
+ "\x80\xe8\xec\xaf\x60\xac\xe7\xf8"
+ "\x9c\xf0\xfc\x61\x39\x07\x98\x6b"
+ "\x88\xe3\x98\x22\x28\x18\x4a\x2d"
+ "\x25\xef\x10\xe3\x83\x66\x3f\xfd"
+ "\xc7\x0b\xa3\xfd\x97\xa9\xf4\xbd"
+ "\xd8\x2a\xee\x4a\x50\xad\xcc\xb5"
+ "\xc7\xab\xb8\x79\x9c\xd1\xf1\x27"
+ "\x08\xf5\xf5\xe8\x1b\x66\xce\x41"
+ "\x56\x60\x94\x86\xf0\x78\xc2\xfa"
+ "\x5b\x63\x40\xb1\xd1\x1a\x38\x69"
+ "\x0b\x8c\xb2\xf5\xa2\xbe\x90\x9d"
+ "\x46\x23\x79\x8b\x3b\x4a\xf4\xbb"
+ "\x55\xf7\x58\x9d\xaf\x59\xff\x74"
+ "\xf3\xb9\xc4\x26\xb1\xf8\xe1\x28"
+ "\x8b\x5e\x8f\x6d\x64\xe7\xe8\x63"
+ "\xd2\x9e\xcb\xee\xae\x19\x04\x1d"
+ "\x05\xf0\x9d\x99\x7b\x33\x33\xae"
+ "\x6e\xe5\x09\xdd\x67\x51\xc4\xc8"
+ "\x6a\xc7\x36\x35\xc9\x93\x76\xa1"
+ "\xa8\x1c\xfa\x75\x92\x34\x0e\x7d"
+ "\x3d\x1d\xef\x00\xfd\xa5\x25\x12"
+ "\x7c\x91\x21\x41\xcc\x50\x47\xa9"
+ "\x22\x50\x24\x96\x34\x79\x3d\xe8"
+ "\x3f\xa0\x56\xaf\x98\x53\x55\xc3"
+ "\x46\x1b\x17\x54\xb8\xb0\xb7\xe0"
+ "\xe0\xab\x47\x6f\x06\xda\xcc\x75"
+ "\xa7\x96\xb7\x92\xf3\xa0\x5f\xe6"
+ "\xba\x97\xe3\x2f\x97\x05\xb2\x99"
+ "\xa0\x09\x10\x98\x9c\xd3\x2e\xd1"
+ "\x7e\x2a\x30\x54\x3c\xb9\x33\xe3"
+ "\xf2\xaf\xd3\xa5\xee\xd0\x0b\x8a"
+ "\x19\x54\x0f\x02\x51\x1f\x91\xdf"
+ "\x71\x9c\xad\x77\x35\x28\x55\x6d"
+ "\xcd\x7a\xd9\xa3\x41\x98\x6b\x37"
+ "\x19\x0f\xbe\xae\x69\xb2\x25\x01"
+ "\xee\x0e\x51\x4b\x53\xea\x0f\x5f"
+ "\x85\x74\x79\x36\x32\x0a\x2a\x40"
+ "\xad\x6b\x78\x41\x54\x99\xe9\xc1"
+ "\x2b\x6c\x9b\x42\x21\xef\xe2\x50"
+ "\x56\x8d\x78\xdf\x58\xbe\x0a\x0f"
+ "\xfc\xfc\x0d\x2e\xd0\xcb\xa6\x0a"
+ "\xa8\xd9\x1e\xa9\xd4\x7c\x99\x88"
+ "\xcf\x11\xad\x1c\xd3\x04\x63\x55"
+ "\xef\x85\x0b\x69\xa1\x40\xf1\x75"
+ "\x24\xf4\xe5\x2c\xd4\x7a\x24\x50"
+ "\x8f\xa2\x71\xc9\x92\x20\xcd\xcf"
+ "\xda\x40\xbe\xf6\xfe\x1a\xca\xc7"
+ "\x4a\x80\x45\x55\xcb\xdd\xb7\x01"
+ "\xb0\x8d\xcb\xd2\xae\xbd\xa4\xd0"
+ "\x5c\x10\x05\x66\x7b\xd4\xff\xd9"
+ "\xc4\x23\x9d\x8d\x6b\x24\xf8\x3f"
+ "\x73\x4d\x5c\x2b\x33\x4c\x5e\x63"
+ "\x74\x6d\x03\xa1\x7a\x35\x65\x17"
+ "\x38\x7f\x3b\xc1\x69\xcf\x61\x34"
+ "\x30\x21\xaf\x97\x47\x12\x3f\xa1"
+ "\xa7\x50\xc5\x87\xfb\x3f\x70\x32"
+ "\x86\x17\x5f\x25\xe4\x74\xc6\xd0"
+ "\x9b\x39\xe6\xe1\x5a\xec\x8f\x40"
+ "\xce\xcc\x37\x3b\xd8\x72\x1c\x31"
+ "\x75\xa4\xa6\x89\x8c\xdd\xd6\xd2"
+ "\x32\x3d\xe8\xc3\x54\xab\x1f\x35"
+ "\x52\xb4\x94\x81\xb0\x37\x3a\x03"
+ "\xbb\xb1\x99\x30\xa5\xf8\x21\xcd"
+ "\x93\x5d\xa7\x13\xed\xc7\x49\x09"
+ "\x70\xda\x08\x39\xaa\x15\x9e\x45"
+ "\x35\x2b\x0f\x5c\x8c\x8b\xc9"
+ "\xa8\xb8\x9f\xfd\x37\x36\x31\x7e"
+ "\x34\x4f\xc1\xc0\xca\x8a\x22\xfd",
+ .clen = 735,
}
};
@@ -14947,6 +14376,282 @@ static const struct aead_testvec sm4_ccm_tv_template[] = {
"\x16\x84\x2D\x4F\xA1\x86\xF5\x6A"
"\xB3\x32\x56\x97\x1F\xA1\x10\xF4",
.clen = 80,
+ }, { /* Generated from AES-CCM test vectors */
+ .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+ .klen = 16,
+ .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
+ .alen = 8,
+ .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e",
+ .plen = 23,
+ .ctext = "\x7b\xff\x4a\x15\xf5\x73\xce\x82"
+ "\x6e\xc2\x31\x1d\xe2\x53\x02\xac"
+ "\xa4\x48\xf9\xe4\xf5\x1f\x81\x70"
+ "\x18\xbc\xb6\x84\x01\xb8\xae",
+ .clen = 31,
+ }, {
+ .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
+ "\x53\x14\x73\x66\x8d\x88\xf6\x80",
+ .klen = 16,
+ .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d"
+ "\x50\x20\xda\xe2\x00\x00\x00\x00",
+ .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
+ "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
+ "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
+ "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
+ .alen = 32,
+ .ctext = "\x23\x58\xce\xdc\x40\xb1\xcd\x92"
+ "\x47\x96\x59\xfc\x8a\x26\x4f\xcf",
+ .clen = 16,
+ }, {
+ .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
+ "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+ .klen = 16,
+ .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81"
+ "\x7f\x88\x94\x68\x00\x00\x00\x00",
+ .alen = 0,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\x72\x7e\xf5\xd6\x39\x7a\x2b\x43",
+ .clen = 8,
+ }, {
+ .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
+ "\xa4\x48\x93\x39\x26\x71\x4a\xc6",
+ .klen = 16,
+ .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+ "\x57\xba\xfd\x9e\x00\x00\x00\x00",
+ .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
+ "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+ "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
+ "\x11\xc4\xc6\xdb\x00\x56\x36\x61",
+ .alen = 32,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\xb0\x9d\xc6\xfb\x7d\xb5\xa1\x0e",
+ .clen = 8,
+ }, {
+ .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+ "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b",
+ .klen = 16,
+ .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f"
+ "\x44\x89\x40\x7b\x00\x00\x00\x00",
+ .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
+ "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
+ "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
+ "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
+ .alen = 32,
+ .ptext = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
+ "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
+ "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
+ "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+ .plen = 32,
+ .ctext = "\xc9\xae\xef\x1d\xf3\x2c\xd3\x38"
+ "\xc9\x7f\x7e\x28\xe8\xaa\xb3\x60"
+ "\x49\xdc\x66\xca\x7b\x3d\xe0\x3c"
+ "\xcb\x45\x9c\x1b\xb2\xbe\x07\x90"
+ "\x87\xa6\x6b\x89\x0d\x0f\x90\xaa"
+ "\x7d\xf6\x5a\x9a\x68\x2b\x81\x92",
+ .clen = 48,
+ }, {
+ .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 16,
+ .iv = "\x02\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff\xff\x00\x00\x00",
+ .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
+ "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
+ "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
+ "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe"
+ "\xc8\xf3\x5c\x52\x10\x63",
+ .alen = 38,
+ .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5",
+ .plen = 719,
+ .ctext = "\xc5\x50\x85\x02\x72\xa8\xb3\x62"
+ "\xf9\xcd\x77\x7b\x43\xa5\x04\x70"
+ "\x68\x40\x57\x21\x1c\xfe\xef\x05"
+ "\x4d\xb8\x44\xba\x59\xea\x62\x32"
+ "\xcb\x6b\x6a\x39\x9b\xf3\xe5\xa4"
+ "\x36\x38\xde\x7d\xcf\xb6\xcd\xe3"
+ "\x89\xbf\x37\xc9\x96\x3c\x70\x10"
+ "\x92\x47\xcc\xac\x6f\xf8\x55\x9a"
+ "\x26\x43\x34\xb4\x92\x7d\x68\xfc"
+ "\x60\x37\x74\x2a\x55\xba\xc7\xd7"
+ "\x98\x69\xb7\xcf\x42\xfd\xb2\x10"
+ "\xa0\x59\xe1\x2c\x73\x66\x12\x97"
+ "\x85\x8b\x28\xcc\x29\x02\x15\x89"
+ "\x23\xd3\x32\x92\x87\x57\x09\x13"
+ "\x04\x7e\x8b\x6c\x3a\xc1\x4e\x6c"
+ "\xe1\x9f\xc8\xcc\x47\x9c\xd8\x10"
+ "\xf4\xb7\x5c\x30\x7a\x8b\x0f\x01"
+ "\x52\x38\x02\x92\x99\xac\x03\x90"
+ "\x18\x32\x2d\x21\x6a\x0a\x2a\xe7"
+ "\xc2\xcc\x15\x84\x4e\x2b\x0b\x3a"
+ "\x4c\xdc\xb0\x6b\x10\xd1\x27\x10"
+ "\xf0\x4a\x5c\x43\xa0\x34\x34\x59"
+ "\x47\x43\x48\xcb\x69\xa7\xff\x52"
+ "\xb8\xca\x23\x09\x07\xd7\xc5\xe4"
+ "\x2a\x4f\x99\xd5\x83\x36\x2a\x2d"
+ "\x59\xd0\xca\xb0\xfa\x40\x8c\xab"
+ "\xdf\x69\x08\xd9\x79\x1d\xde\xa8"
+ "\x0b\x34\x74\x4d\xf5\xa0\x4c\x81"
+ "\x7f\x93\x06\x40\x24\xfe\x7d\xcd"
+ "\xe4\xfe\xf8\xf8\x30\xce\xd0\x5d"
+ "\x70\xfd\x0d\x5a\x78\x85\x74\x2d"
+ "\xe4\xb5\x40\x18\x99\x11\xe4\x6a"
+ "\xdf\xfa\x4f\x25\x2c\xde\x15\xb7"
+ "\x12\xd8\xc6\x90\x0d\x0f\xc9\xfb"
+ "\x21\xf1\xed\xfe\x98\xe1\x03\xe2"
+ "\x5c\xef\xb6\xc7\x87\x77\x0e\xcd"
+ "\xff\x78\x94\xc9\xbe\xd3\x47\xf7"
+ "\x8d\x37\x48\x01\x42\xe2\x17\x96"
+ "\xfc\xc0\xcb\x7b\x7b\x57\xaf\x3b"
+ "\xc9\xd0\x94\xce\x5e\x1b\xa9\x47"
+ "\x02\x4d\x74\xcc\x45\x1d\xd3\x2d"
+ "\x5f\x4f\x7f\xf2\x4b\xf9\x59\xee"
+ "\x9e\x9e\xb9\x95\x29\x19\xd1\x5f"
+ "\x72\xab\x8d\xf1\x28\xd1\x1c\xae"
+ "\xc2\xba\xf7\x22\x84\x2c\x83\x51"
+ "\x03\xad\xa3\xef\x81\xa7\xdc\xf1"
+ "\x44\x51\x50\x96\x70\xd1\xe5\x47"
+ "\x57\xf9\x30\x90\xe4\xbf\xfc\x75"
+ "\x14\xaa\x4d\xb7\xb1\xe7\x79\x33"
+ "\x43\xc2\x5c\xc1\xbc\x09\x92\x0f"
+ "\xa7\xaf\x68\x51\x51\xec\x0b\xc3"
+ "\x3d\x2b\x94\x30\x45\x29\x1b\x9e"
+ "\x70\x56\xf8\xd6\x67\x2d\x39\x3b"
+ "\x3c\xd2\xd0\xd3\xdc\x7d\x84\xe9"
+ "\x06\x31\x98\xa6\x5c\xbf\x10\x58"
+ "\xce\xbb\xa7\xe1\x65\x7e\x51\x87"
+ "\x70\x46\xb4\x7f\xf9\xec\x92\x1c"
+ "\x9b\x24\x49\xc1\x04\xbe\x1c\x5f"
+ "\xcc\xb3\x33\x8c\xad\xe7\xdc\x32"
+ "\x54\xa2\x0d\x83\x0f\x3c\x12\x5d"
+ "\x71\xe3\x9c\xae\x71\xa3\x2a\x10"
+ "\xc5\x91\xb4\x73\x96\x60\xdb\x5d"
+ "\x1f\xd5\x9a\xd2\x69\xc3\xd7\x4b"
+ "\xa2\x66\x81\x96\x4a\xaa\x02\xd6"
+ "\xd5\x44\x9b\x42\x3a\x15\x5f\xe7"
+ "\x4d\x7c\xf6\x71\x4a\xea\xe8\x43"
+ "\xd7\x68\xe4\xbc\x05\x87\x49\x05"
+ "\x3b\x47\xb2\x6d\x5f\xd1\x11\xa6"
+ "\x58\xd4\xa2\x45\xec\xb5\x54\x55"
+ "\xd3\xd6\xd2\x6a\x8b\x21\x9e\x2c"
+ "\xf1\x27\x4b\x5b\xe3\xff\xe0\xfd"
+ "\x4b\xf1\xe7\xe2\x84\xf2\x17\x37"
+ "\x11\x68\xc4\x92\x4b\x6b\xef\x8e"
+ "\x75\xf5\xc2\x7d\x5c\xe9\x7c\xfc"
+ "\x2b\x00\x33\x0e\x7d\x69\xd8\xd4"
+ "\x9b\xa8\x38\x54\x7e\x6d\x23\x51"
+ "\x2c\xd6\xc4\x58\x23\x1c\x22\x2a"
+ "\x59\xc5\x9b\xec\x9d\xbf\x03\x0f"
+ "\xb3\xdd\xba\x02\x22\xa0\x34\x37"
+ "\x19\x56\xc2\x5b\x32\x1d\x1e\x66"
+ "\x68\xf4\x47\x05\x04\x18\xa7\x28"
+ "\x80\xf2\xc7\x99\xed\x1e\x72\x48"
+ "\x8f\x97\x5d\xb3\x74\x42\xfd\x0c"
+ "\x0f\x5f\x29\x0c\xf1\x35\x22\x90"
+ "\xd6\x7c\xb8\xa3\x2a\x89\x38\x71"
+ "\xe9\x7a\x55\x3c\x3b\xf2\x6e\x1a"
+ "\x22\x8f\x07\x81\xc1\xe1\xf1\x76"
+ "\x2a\x75\xab\x86\xc4\xcc\x52\x59"
+ "\x83\x19\x5e\xb3\x53\xe2\x81\xdf"
+ "\xe6\x15\xb3\xba\x0c\x0e\xba"
+ "\xa9\x2c\xed\x51\xd5\x06\xc8\xc6"
+ "\x4b\x9f\x5d\x1b\x61\x31\xad\xf4",
+ .clen = 735,
}
};
@@ -15030,6 +14735,68 @@ static const struct hash_testvec sm4_cmac128_tv_template[] = {
}
};
+static const struct hash_testvec sm4_xcbc128_tv_template[] = {
+ { /* Generated from AES-XCBC128 test vectors */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = zeroed_string,
+ .digest = "\xa9\x9a\x5c\x44\xe2\x34\xee\x2c"
+ "\x9b\xe4\x9d\xca\x64\xb0\xa5\xc4",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = "\x00\x01\x02",
+ .digest = "\x17\x27\x62\xf3\x8b\x88\x1d\xc0"
+ "\x97\x35\x9c\x3e\x9f\x27\xb7\x83",
+ .psize = 3,
+ .ksize = 16,
+ } , {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .digest = "\xda\x45\xd1\xac\xec\x4d\xab\x46"
+ "\xdd\x59\xe0\x44\xff\x59\xd5\xfc",
+ .psize = 16,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13",
+ .digest = "\xbe\x24\x5d\x81\x8c\x8a\x10\xa4"
+ "\x8e\xc2\x16\xfa\xa4\x83\xc9\x2a",
+ .psize = 20,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .digest = "\x91\x82\x31\x56\xd5\x77\xa4\xc5"
+ "\x88\x2d\xce\x3a\x87\x5e\xbd\xba",
+ .psize = 32,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21",
+ .digest = "\x2a\xae\xa5\x24\x0c\x12\x9f\x5f"
+ "\x55\xfb\xae\x35\x13\x0d\x22\x2d",
+ .psize = 34,
+ .ksize = 16,
+ }
+};
+
/* Cast6 test vectors from RFC 2612 */
static const struct cipher_testvec cast6_tv_template[] = {
{
@@ -16167,104 +15934,6 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
},
};
-static const struct cipher_testvec aes_cfb_tv_template[] = {
- { /* From NIST SP800-38A */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
- "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
- "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
- "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
- "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
- "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
- "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
- "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
- "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
- "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
- "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
- "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
- "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
- .len = 64,
- }, {
- .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
- "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
- "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
- .klen = 24,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
- "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
- "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
- "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
- "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
- "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
- "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
- "\x67\xce\x7f\x7f\x81\x17\x36\x21"
- "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
- "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
- "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
- "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
- "\x42\xae\x8f\xba\x58\x4b\x09\xff",
- .len = 64,
- }, {
- .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
- "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
- "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
- "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
- .klen = 32,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
- "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
- "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
- "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
- "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
- "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
- "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
- "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
- "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
- "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
- "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
- "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
- "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
- .len = 64,
- }, { /* > 16 bytes, not a multiple of 16 bytes */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
- "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
- "\xc8",
- .len = 17,
- }, { /* < 16 bytes */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
- .len = 7,
- },
-};
-
static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -19955,55 +19624,6 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = {
},
};
-static const struct cipher_testvec aes_ofb_tv_template[] = {
- { /* From NIST Special Publication 800-38A, Appendix F.5 */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
- "\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
- "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
- "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
- "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
- "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
- "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
- "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
- "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
- "\x3c\x52\xda\xc5\x4e\xd8\x25"
- "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
- "\x44\xf7\xa8\x22\x60\xed\xcc"
- "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
- "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
- .len = 64,
- }, { /* > 16 bytes, not a multiple of 16 bytes */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
- "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
- "\xae",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
- "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
- "\x77",
- .len = 17,
- }, { /* < 16 bytes */
- .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
- "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
- .klen = 16,
- .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
- .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
- .len = 7,
- }
-};
-
static const struct aead_testvec aes_gcm_tv_template[] = {
{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
.key = zeroed_string,
@@ -22865,136 +22485,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
};
/*
- * All key wrapping test vectors taken from
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip
- *
- * Note: as documented in keywrap.c, the ivout for encryption is the first
- * semiblock of the ciphertext from the test vector. For decryption, iv is
- * the first semiblock of the ciphertext.
- */
-static const struct cipher_testvec aes_kw_tv_template[] = {
- {
- .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
- "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
- .klen = 16,
- .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea"
- "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f",
- .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3"
- "\xf5\x6f\xab\xea\x25\x48\xf5\xfb",
- .len = 16,
- .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
- .generates_iv = true,
- }, {
- .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
- "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
- "\x03\x86\xf9\x32\x78\x6e\xf7\x96"
- "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f",
- .klen = 32,
- .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa"
- "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa",
- .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15"
- "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43",
- .len = 16,
- .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
- .generates_iv = true,
- },
-};
-
-/*
- * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
- * test vectors, taken from Appendix B.2.9 and B.2.10:
- * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
- * Only AES-128 is supported at this time.
- */
-static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
- {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9",
- .dtlen = 16,
- .v = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55"
- "\x84\x79\x66\x85\xc1\x2f\x76\x41",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa",
- .dtlen = 16,
- .v = "\xc0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c"
- "\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb",
- .dtlen = 16,
- .v = "\xe0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5"
- "\x29\x14\x28\x81\xa9\x4d\x4e\xc7",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc",
- .dtlen = 16,
- .v = "\xf0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5"
- "\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd",
- .dtlen = 16,
- .v = "\xf8\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x05\x25\x92\x46\x61\x79\xd2\xcb"
- "\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8",
- .rlen = 16,
- .loops = 1,
- }, { /* Monte Carlo Test */
- .key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5"
- "\xd8\x2b\xe8\xc3\x72\x55\xc8\x48",
- .klen = 16,
- .dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b"
- "\x67\xc9\x25\xfa\x70\x1f\x11\xac",
- .dtlen = 16,
- .v = "\x57\x2c\x8e\x76\x87\x26\x47\x97"
- "\x7e\x74\xfb\xdd\xc4\x95\x01\xd1",
- .vlen = 16,
- .result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb"
- "\xe4\x57\x90\xd5\xc3\xfc\x9b\x73",
- .rlen = 16,
- .loops = 10000,
- },
-};
-
-/*
* SP800-90A DRBG Test vectors from
* http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
*
@@ -24688,6 +24178,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
/*
* CAMELLIA test vectors.
*/
+static const struct hash_testvec camellia_cmac128_tv_template[] = {
+ { /* From draft-kato-ipsec-camellia-cmac96and128-01 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = zeroed_string,
+ .digest = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9"
+ "\xa0\x0f\x89\x64\x80\x94\xfc\x71",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+ .digest = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5"
+ "\x6d\x7d\x45\xa9\x5e\xe1\x79\x93",
+ .psize = 16,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
+ .digest = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61"
+ "\x44\xac\x18\x66\x13\x1d\x9f\x22",
+ .psize = 40,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .digest = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d"
+ "\x93\x9a\x8a\x4e\x19\x46\x6e\xe9",
+ .psize = 64,
+ .ksize = 16,
+ }
+};
static const struct cipher_testvec camellia_tv_template[] = {
{
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
@@ -28405,909 +27942,6 @@ static const struct cipher_testvec aria_ctr_tv_template[] = {
}
};
-static const struct cipher_testvec aria_cfb_tv_template[] = {
- {
- .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23"
- "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1",
- .klen = 16,
- .iv = "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0"
- "\xfd\xab\x56\xa6\x6e\xda\x7c\x57",
- .ptext = "\x36\x36\x89\x09\xcd\xa8\xd3\x91"
- "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0",
- .ctext = "\x19\x28\xb5\xf2\x1c\xbc\xf8\xaf"
- "\xb9\xae\x1b\x23\x4f\xe1\x6e\x40",
- }, {
- .key = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37"
- "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe",
- .klen = 16,
- .iv = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52"
- "\x53\x39\xfc\xc1\xf5\xc0\x56\x22",
- .ptext = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93"
- "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e"
- "\xfa\x3f\x70\x52\xfb\x04\x0e\xed"
- "\x0e\x60\x75\x84\x21\xdf\x13\xa1",
- .ctext = "\x3f\x8c\xa9\x19\xd6\xb4\xfb\xed"
- "\x9c\x6d\xaa\x1b\xe1\xc1\xe6\xa8"
- "\x47\x35\x7d\xa3\x96\x7d\x53\x60"
- "\xa9\x33\x9c\x34\xae\x7d\x7c\x74",
- .len = 32,
- }, {
- .key = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f"
- "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d",
- .klen = 16,
- .iv = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea"
- "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4",
- .ptext = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0"
- "\xef\x3e\x5f\x46\x62\x88\xd5\x26"
- "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2"
- "\x39\x56\x34\x63\x2c\xc5\x51\x13"
- "\x48\x29\x3a\x58\xbe\x41\xc5\x80"
- "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e",
- .ctext = "\x28\xd8\xa7\xf8\x74\x98\x00\xfc"
- "\xd6\x48\xad\xbd\xbe\x3f\x0e\x7b"
- "\xa3\xec\x03\x6a\xfb\xc9\x01\x83"
- "\xb3\x2f\xda\x5e\x66\xa0\xc3\xec"
- "\xe9\xd4\x72\x2a\xa2\x90\x41\xcf"
- "\xde\x30\x79\xc3\x82\x10\x51\xe1",
- .len = 48,
- }, {
- .key = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b"
- "\x77\xb5\xca\x90\xda\x1d\x22\x17",
- .klen = 16,
- .iv = "\xd9\xa0\x57\x80\xc8\x96\x70\x86"
- "\x07\x2c\xf4\x61\x79\x09\x01\x8f",
- .ptext = "\x37\x32\x98\xd4\x86\x2b\x3b\x80"
- "\x07\x60\xba\xf0\x2e\xc3\x4a\x57"
- "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a"
- "\xe6\x08\xf0\xbe\x77\xd1\x62\x40"
- "\xa0\x82\x09\x60\x47\xbb\x16\x56"
- "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c"
- "\x05\x32\x63\x1a\xc4\x46\x6f\x55"
- "\x32\xde\x41\x5a\xf7\x52\xd7\xfa",
- .ctext = "\x29\x31\x55\xd2\xe5\x0b\x81\x39"
- "\xf9\xbc\x63\xe2\xfa\x26\x99\xde"
- "\x5c\xd3\x0a\x56\xe5\xfc\x83\xdd"
- "\xab\x26\x90\x7d\xa8\x0f\x01\xa6"
- "\x0e\x01\xdc\x1f\xfa\xa7\xdd\x09"
- "\xf9\xbf\x12\xf4\xc6\x9f\xbd\x57"
- "\x23\x68\x54\x0f\xe0\xcf\x1c\x6d"
- "\xe1\x5e\x0b\x4a\x1e\x71\x1d\xaa",
- .len = 64,
- }, {
- .key = "\x30\x9d\x59\x8d\x64\x76\xad\x37"
- "\xba\xbc\x46\x6a\x69\x17\x3c\xac",
- .klen = 16,
- .iv = "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e"
- "\x54\x74\x8f\x3d\xe2\xd6\x85\x44",
- .ptext = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa"
- "\xad\xfd\x32\x94\x1f\x56\x57\xd1"
- "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d"
- "\xb2\x92\x83\x70\x1e\xa3\x97\xa6"
- "\x65\x53\x39\xeb\x53\x8f\xb1\x38"
- "\x91\xac\x17\x11\x1c\x03\x69\x53"
- "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b"
- "\xb6\x02\xc4\xfa\x95\x01\x33\xa8"
- "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67"
- "\xce\x8f\x9f\xea\x46\x66\x99\xb8",
- .ctext = "\x38\xbc\xf5\x9d\x0e\x26\xa6\x18"
- "\x95\x0b\x23\x54\x09\xa1\xf9\x46"
- "\x7a\x31\xa0\xd7\x4a\xec\xb3\x10"
- "\x8a\x8e\x99\x78\x6c\x6e\x76\xf2"
- "\x63\x8a\x3b\x90\xaa\xd5\x64\x65"
- "\x5a\x52\xb0\x36\x4c\xce\xed\xc7"
- "\x51\x3c\x06\xb0\xee\x54\xec\x10"
- "\xc0\x5f\xfd\xa9\x44\x9a\x29\x32"
- "\x19\x79\x7d\x2b\x14\x26\x96\x13"
- "\x9d\xa5\x61\xbd\xb6\x72\x37\x26",
- .len = 80,
- }, {
- .key = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf"
- "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3",
- .klen = 16,
- .iv = "\x17\x94\x77\x2f\x92\xb8\x87\xc2"
- "\xcc\x6f\x70\x26\x87\xc7\x10\x8a",
- .ptext = "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41"
- "\x4c\xf4\xd0\x34\xd0\x95\xab\xae"
- "\x82\x5c\xfd\xfa\x13\x86\x25\xce"
- "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50"
- "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a"
- "\xaf\x06\xea\xf4\x65\x59\xd6\xc2"
- "\x84\xa0\x53\x97\x61\x30\x70\x15"
- "\xac\x45\x8e\xe8\xeb\xa1\x72\x93"
- "\x26\x76\x98\x6f\xe4\x86\xca\xf0"
- "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95"
- "\x86\x26\x20\x0e\x62\xfe\x8f\x1e"
- "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda",
- .ctext = "\xdf\x79\x58\x30\x6f\x47\x12\x78"
- "\x04\xb2\x0b\x1a\x62\x22\xe2\x9f"
- "\xfe\x90\x50\x41\x1b\x6a\x6a\x9c"
- "\x4e\x77\x8f\xca\xd1\x68\x31\xcd"
- "\x41\x82\xa5\x5b\xc0\x08\x2b\x37"
- "\x62\xec\x95\xf1\x56\x12\x38\x66"
- "\x84\x82\x72\xda\x00\x21\x96\x82"
- "\x33\xd4\x99\xaa\xb9\xeb\xd5\xc3"
- "\x2b\xa8\xf7\xdc\x13\x0e\x21\x9f"
- "\x4b\xf9\x42\x58\xa8\x39\x10\xd5"
- "\x86\xa5\xc6\x78\x3b\x34\x05\x03"
- "\x54\x43\x2b\x80\xa9\x53\x4d\x0e",
- .len = 96,
- }, {
- .key = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e"
- "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1",
- .klen = 16,
- .iv = "\xee\xb7\x0d\x65\x00\x38\xab\x71"
- "\x70\x6e\xb3\x97\x86\xd3\xcd\xad",
- .ptext = "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9"
- "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e"
- "\x25\x1b\xc2\xa6\x21\x25\xeb\x97"
- "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94"
- "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb"
- "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2"
- "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17"
- "\x58\x2b\x1d\x73\x9a\x9c\x63\x18"
- "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb"
- "\xc9\x9d\x79\x51\x34\x39\x4f\x07"
- "\xa2\x7c\x21\x04\x91\x3b\x79\x79"
- "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0"
- "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2"
- "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95",
- .ctext = "\xe4\x25\x0d\x22\xeb\xbe\x5e\x90"
- "\x01\xe5\xae\xc9\x94\xbd\x93\x89"
- "\x5e\x5a\x5a\x2f\xf6\xdf\xf8\x16"
- "\xd3\xb2\xed\x29\x51\xe2\x75\xb0"
- "\x1a\x48\xb5\xe6\xd3\x58\x40\xc7"
- "\x6f\x6f\xcf\x57\x82\x43\x5a\x36"
- "\xef\x27\xe1\x34\x85\x01\xec\x98"
- "\x00\xbd\x94\x6f\x12\x39\xa8\x13"
- "\xfe\x3c\x39\xc0\xc6\xe1\xcc\x05"
- "\x0e\xd5\xc9\xda\xbd\xdd\xdb\xaa"
- "\x5a\xaa\x8e\xe8\xa8\x0a\xc5\x18"
- "\xb4\x1d\x13\x81\xc9\xc4\xaa\x61"
- "\xa9\xbd\xaa\x03\x12\x93\xbb\xed"
- "\x0c\x6e\xbd\x1c\x05\x16\x8a\x59",
- .len = 112,
- }, {
- .key = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46"
- "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15",
- .klen = 16,
- .iv = "\x0c\x85\x2f\x62\xe5\xf4\x35\x96"
- "\xb1\x9b\x5d\x00\x10\xe9\x70\x12",
- .ptext = "\x3a\x87\x7f\x67\xf1\x81\x7a\x05"
- "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e"
- "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41"
- "\x9c\x14\x44\x5a\xd5\x1c\x50\x08"
- "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e"
- "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3"
- "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b"
- "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe"
- "\x2f\x7e\x1c\x10\x81\x36\x2d\x79"
- "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c"
- "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda"
- "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d"
- "\x70\x86\xa5\x92\x9f\xee\xcd\x3f"
- "\x6a\x55\x84\x98\x28\x03\x02\xc2"
- "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8"
- "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c",
- .ctext = "\xa7\x4c\x96\x55\x7c\x07\xce\xb2"
- "\x6f\x63\x9f\xc6\x8b\x6f\xc6\x4a"
- "\x85\xf2\x4b\xdf\x62\x0c\x6c\x8d"
- "\x13\x5d\xd3\x40\x58\xa6\xf9\x03"
- "\xd9\xf2\x48\x4e\x12\x64\x9a\x55"
- "\xa2\xa3\xd0\x19\xe5\x5b\xaa\x62"
- "\x7b\xe9\x2a\x23\xab\xb5\xa6\xcf"
- "\x53\x59\x70\xc6\xb8\x92\x12\x3b"
- "\x93\x68\x24\xba\x7d\xd6\xc0\x5b"
- "\x06\x2e\x7f\x2e\x32\x5d\x42\x9c"
- "\x13\x8e\x92\x3c\x99\x20\x32\x2b"
- "\x4a\x41\xb2\x4a\x81\xe8\x6e\x7f"
- "\x5b\x8e\xca\x4d\xd7\x29\x96\xde"
- "\x30\x9c\xa6\x84\x90\xe7\xc2\xae"
- "\xf4\x7e\x73\x32\x4c\x25\xec\xef"
- "\x58\x69\x63\x3f\x4e\x71\x4b\x1c",
- .len = 128,
- }, {
- .key = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17"
- "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c",
- .klen = 16,
- .iv = "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad"
- "\x8c\xaf\x36\x3d\xff\x29\x8b\x33",
- .ptext = "\x87\x96\x77\x1a\x10\x81\x63\x8a"
- "\x63\xde\x88\xa9\x9d\xa9\x01\xf2"
- "\xdf\xc9\x25\x35\x48\x3a\x15\xdf"
- "\x20\x6b\x91\x7c\x56\xe5\x10\x7a"
- "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f"
- "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd"
- "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8"
- "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a"
- "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e"
- "\xc0\x3b\x12\x28\xca\x26\x7b\xb3"
- "\x14\xc1\x7f\x66\xff\x3b\xa4\x80"
- "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a"
- "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf"
- "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff"
- "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd"
- "\xcd\x56\x02\x95\xc9\x54\x6e\x62"
- "\x6a\x97\x75\x1a\x21\x16\x46\xfb"
- "\xc2\xab\x62\x54\xef\xba\xae\x46",
- .ctext = "\x11\x7f\xea\x49\xaf\x24\x52\xa2"
- "\xde\x60\x99\x58\x23\xf9\x9e\x91"
- "\x94\x52\x31\xa3\x28\x07\x14\xad"
- "\x00\x24\x4a\x4a\xe7\x18\xd7\x24"
- "\xcc\x8b\x66\x53\x82\x65\x31\xa5"
- "\x54\x76\x59\x0b\x69\x6f\x90\x2c"
- "\x8d\xa5\x2b\x61\x05\x80\xfb\xe0"
- "\xf9\x6e\xaf\xb9\xc4\x15\x67\xcc"
- "\x15\xce\xa0\xc0\xf2\xae\xa6\x15"
- "\x24\x9a\xe5\xcb\x09\x42\xcf\x41"
- "\x95\xa4\x8d\xbf\xe8\xb8\x40\xcd"
- "\xb0\x33\x2c\xb3\xc4\xdd\xf9\x45"
- "\xda\xb2\xeb\xb3\xf8\xfa\x7f\xe3"
- "\xc0\x3a\x98\xe7\x17\x4a\x0c\x60"
- "\xb2\x22\xba\x3b\x21\x85\x27\x56"
- "\xe0\xb2\xf7\x2a\x59\xb1\x56\x20"
- "\x0b\xa9\x13\x73\xe0\x6f\x61\x32"
- "\xa5\x38\x14\xb3\xe3\xaa\x70\x44",
- .len = 144,
- }, {
- .key = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9"
- "\x05\x26\x23\x81\x19\x27\xad\x7b",
- .klen = 16,
- .iv = "\x9c\x8b\xfb\x65\xa4\x61\xee\x69"
- "\x44\xbf\x59\xde\x03\x61\x11\x12",
- .ptext = "\x8d\x94\x48\x47\xa9\x52\x16\xfb"
- "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c"
- "\xb6\x09\x21\x12\x42\x98\x13\xa1"
- "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea"
- "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c"
- "\x66\xb8\x4d\x60\x67\x82\xcc\x8d"
- "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c"
- "\x54\x84\x2a\x06\xb5\xd1\x34\x57"
- "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33"
- "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97"
- "\x3e\xc6\xec\xaf\x74\xe8\x72\x91"
- "\xb2\xc6\x56\xb3\x23\x29\x43\xe0"
- "\xfb\xcc\x21\x38\x64\x78\x9e\x78"
- "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01"
- "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58"
- "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d"
- "\x90\xb7\x60\x78\xa8\x9f\x52\xe3"
- "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53"
- "\x42\x0e\x15\x31\xf6\x48\xa3\x0a"
- "\x20\xf0\x79\x67\xb1\x83\x26\x66",
- .ctext = "\x5b\xc0\xe8\x17\xa4\xf9\xea\xce"
- "\x9e\xf9\xe0\xb1\xac\x37\xe9\x41"
- "\xc8\x06\xf9\x1c\x1a\xfc\xe8\x7a"
- "\x38\xf2\x80\x66\xc2\x70\x59\x4e"
- "\xe0\x32\x5b\x27\x39\xf5\xfb\x03"
- "\xc8\xaf\xd6\x7e\x57\xc7\xc6\x71"
- "\xd9\xd0\x48\x39\xb1\x0d\xa8\x1a"
- "\x23\x8a\x3d\x05\xe2\x90\x7e\x18"
- "\xd7\x20\x04\x3b\x82\x76\x3f\xaa"
- "\xc2\x89\xb6\x9e\x14\x2f\x46\xcd"
- "\x51\x9b\xa8\x7b\x62\x7b\x9c\x17"
- "\xc4\xe1\x8b\x3f\xb5\x4d\xac\x66"
- "\x49\xf6\xb6\x4c\x3e\x16\x46\xb0"
- "\xca\x04\xef\x72\x5c\x03\x0a\xe5"
- "\x2f\x4e\x36\x38\x36\x9f\xf4\xe2"
- "\x81\x7a\x4c\xdf\x36\x27\xd5\x9d"
- "\x03\xad\x1d\x3a\xe9\x2a\x99\xb0"
- "\x2c\xba\x13\x75\xc8\x37\x97\x11"
- "\xf4\x15\x0f\xb7\x75\x26\xa1\x14"
- "\x79\xec\x1f\xab\xd2\x10\x8c\x5f",
- .len = 160,
- }, {
- .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23"
- "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1"
- "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0",
- .klen = 24,
- .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57"
- "\x36\x36\x89\x09\xcd\xa8\xd3\x91",
- .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0"
- "\x51\xe3\x8c\xe9\x76\xcd\xff\x37",
- .ctext = "\xa4\x12\x2f\xc4\xf0\x6d\xd9\x46"
- "\xe4\xe6\xd1\x0b\x6d\x14\xf0\x8f",
- .len = 16,
- }, {
- .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe"
- "\x3d\x2d\x85\x75\x6e\x18\x8a\x52"
- "\x53\x39\xfc\xc1\xf5\xc0\x56\x22",
- .klen = 24,
- .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93"
- "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e",
- .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed"
- "\x0e\x60\x75\x84\x21\xdf\x13\xa1"
- "\x26\xf8\x8c\x26\x0a\x37\x51\x8f"
- "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d",
- .ctext = "\x80\x2b\xf0\x88\xb9\x4b\x8d\xf5"
- "\xc3\x0e\x15\x5b\xea\x5d\x5b\xa8"
- "\x52\xe7\x83\x3c\xa1\x51\x1c\x1f"
- "\x38\xd9\x7c\x88\x3c\x3a\xcd\x3e",
- .len = 32,
- }, {
- .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea"
- "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4"
- "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0",
- .klen = 24,
- .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26"
- "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2",
- .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13"
- "\x48\x29\x3a\x58\xbe\x41\xc5\x80"
- "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e"
- "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b"
- "\x77\xb5\xca\x90\xda\x1d\x22\x17"
- "\xd9\xa0\x57\x80\xc8\x96\x70\x86",
- .ctext = "\x65\x01\x3c\xb0\xac\x4c\x63\xb6"
- "\xe7\xf1\xf4\x61\x35\xf4\x36\xde"
- "\xeb\x0f\x8c\x34\xd1\x78\xb4\x00"
- "\xb2\xc1\x7c\x28\xb2\xb7\xbb\xa3"
- "\xc6\xb7\x27\xf7\x6d\x56\x79\xfa"
- "\x61\x57\xba\x30\x6f\x56\xe9\x8c",
- .len = 48,
- }, {
- .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f"
- "\x37\x32\x98\xd4\x86\x2b\x3b\x80"
- "\x07\x60\xba\xf0\x2e\xc3\x4a\x57",
- .klen = 24,
- .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a"
- "\xe6\x08\xf0\xbe\x77\xd1\x62\x40",
- .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56"
- "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c"
- "\x05\x32\x63\x1a\xc4\x46\x6f\x55"
- "\x32\xde\x41\x5a\xf7\x52\xd7\xfa"
- "\x30\x9d\x59\x8d\x64\x76\xad\x37"
- "\xba\xbc\x46\x6a\x69\x17\x3c\xac"
- "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e"
- "\x54\x74\x8f\x3d\xe2\xd6\x85\x44",
- .ctext = "\x5a\xfb\xb1\x2c\x6e\xe5\xb8\xe0"
- "\x80\xb6\x77\xa8\xfe\x10\x3a\x99"
- "\xbf\xc0\x2a\xfe\x6f\x38\xf2\x1d"
- "\x53\x6c\x05\x83\xb1\x13\x00\x87"
- "\x92\x92\x42\x70\xcf\x9f\xf7\x8f"
- "\x53\x55\x18\x6f\x35\x68\x35\x50"
- "\x3a\xc8\x45\x3e\xa3\xf1\x33\x2e"
- "\xa1\x65\x42\xe2\x6d\x31\x8c\x4b",
- .len = 64,
- }, {
- .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa"
- "\xad\xfd\x32\x94\x1f\x56\x57\xd1"
- "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d",
- .klen = 24,
- .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6"
- "\x65\x53\x39\xeb\x53\x8f\xb1\x38",
- .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53"
- "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b"
- "\xb6\x02\xc4\xfa\x95\x01\x33\xa8"
- "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67"
- "\xce\x8f\x9f\xea\x46\x66\x99\xb8"
- "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf"
- "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3"
- "\x17\x94\x77\x2f\x92\xb8\x87\xc2"
- "\xcc\x6f\x70\x26\x87\xc7\x10\x8a"
- "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41",
- .ctext = "\xc9\x5f\xe0\x60\x61\x38\x7e\x79"
- "\x52\x68\x64\x8f\x55\x9b\x6b\x72"
- "\xa5\x17\x61\xb7\xce\x02\xa9\xa4"
- "\x5c\x73\x45\x33\xd1\x07\x5e\xdc"
- "\xe5\xbe\xa7\xde\x69\xa0\x97\x98"
- "\x02\xef\xa4\x67\x51\x60\x69\x4f"
- "\x03\xf5\xa8\x5f\x03\x69\xbc\xc2"
- "\x34\x59\x7e\xd4\xd2\xb3\x32\x2f"
- "\x0c\xb4\x37\xca\xc4\xc7\x93\xf4"
- "\xa4\xab\x01\x3f\x91\x29\x55\x98",
- .len = 80,
- }, {
- .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae"
- "\x82\x5c\xfd\xfa\x13\x86\x25\xce"
- "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50",
- .klen = 24,
- .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a"
- "\xaf\x06\xea\xf4\x65\x59\xd6\xc2",
- .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15"
- "\xac\x45\x8e\xe8\xeb\xa1\x72\x93"
- "\x26\x76\x98\x6f\xe4\x86\xca\xf0"
- "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95"
- "\x86\x26\x20\x0e\x62\xfe\x8f\x1e"
- "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda"
- "\x6e\x49\x20\xd5\xb7\x01\x83\x4e"
- "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1"
- "\xee\xb7\x0d\x65\x00\x38\xab\x71"
- "\x70\x6e\xb3\x97\x86\xd3\xcd\xad"
- "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9"
- "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e",
- .ctext = "\x03\x2c\x39\x24\x99\xb5\xf6\x79"
- "\x91\x89\xb7\xf8\x89\x68\x37\x9d"
- "\xa2\x80\x95\x74\x87\x64\xb9\xeb"
- "\x85\x28\x92\x9a\x6e\xd3\x3b\x50"
- "\x4c\x80\x5b\xe4\xf2\x7e\xda\x2a"
- "\xd4\xf8\xcb\xe3\x6f\xdf\xae\x0e"
- "\xc5\x6c\x0b\x49\x2e\x29\x1c\xf2"
- "\x3f\x44\x44\x12\x67\xa6\xff\x44"
- "\xe0\xec\xd8\xf7\x32\xde\x21\x15"
- "\xab\x8f\x98\x4d\xed\xb0\x42\xfd"
- "\x83\x94\xe2\xcc\x69\x6d\xe8\xdb"
- "\x62\x93\x1f\xd0\xf4\x8c\x62\xc0",
- .len = 96,
- }, {
- .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97"
- "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94"
- "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb",
- .klen = 24,
- .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2"
- "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17",
- .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18"
- "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb"
- "\xc9\x9d\x79\x51\x34\x39\x4f\x07"
- "\xa2\x7c\x21\x04\x91\x3b\x79\x79"
- "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0"
- "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2"
- "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95"
- "\xb6\x08\x1d\x31\xaf\xf4\x17\x46"
- "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15"
- "\x0c\x85\x2f\x62\xe5\xf4\x35\x96"
- "\xb1\x9b\x5d\x00\x10\xe9\x70\x12"
- "\x3a\x87\x7f\x67\xf1\x81\x7a\x05"
- "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e"
- "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41",
- .ctext = "\xd4\x9a\x04\x54\x05\xd2\xe6\x3f"
- "\xb0\xa4\x36\x5e\x1e\x9c\x35\xb0"
- "\xc0\x89\xbd\x1c\xaa\x45\xa6\xc8"
- "\x16\x68\x4a\x06\x93\x67\x88\xd7"
- "\x72\x6e\x48\x0a\x17\xa3\x52\x8b"
- "\x96\x5f\x41\xf6\x17\x64\x55\x8b"
- "\xac\xce\xf6\x8c\xce\xd2\xd4\xd4"
- "\x8d\x92\x32\xe0\x0d\xb4\xf7\x4a"
- "\x90\xaf\x7b\x85\x21\x46\x2e\xa6"
- "\x9e\xac\x0d\x22\xf2\x26\xf6\xd3"
- "\x27\xcd\x59\xa0\xe2\xbb\x22\xcd"
- "\x35\xb6\x28\x45\x0a\x46\xb0\x3a"
- "\xac\x3e\xd3\x5b\xc6\x54\xa2\xa3"
- "\x6d\xbb\xb3\xcd\xc5\x64\x62\x92",
- .len = 112,
- }, {
- .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08"
- "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e"
- "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3",
- .klen = 24,
- .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b"
- "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe",
- .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79"
- "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c"
- "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda"
- "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d"
- "\x70\x86\xa5\x92\x9f\xee\xcd\x3f"
- "\x6a\x55\x84\x98\x28\x03\x02\xc2"
- "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8"
- "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c"
- "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17"
- "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c"
- "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad"
- "\x8c\xaf\x36\x3d\xff\x29\x8b\x33"
- "\x87\x96\x77\x1a\x10\x81\x63\x8a"
- "\x63\xde\x88\xa9\x9d\xa9\x01\xf2"
- "\xdf\xc9\x25\x35\x48\x3a\x15\xdf"
- "\x20\x6b\x91\x7c\x56\xe5\x10\x7a",
- .ctext = "\xbc\x57\x2a\x88\x0a\xd0\x06\x4f"
- "\xdb\x7b\x03\x9f\x97\x1a\x20\xfe"
- "\x15\x91\xb4\xed\x5d\x78\x89\x2a"
- "\x67\x6b\x9c\x47\x36\xc2\x80\x0e"
- "\x03\x8d\x6f\xfc\x94\xc7\xc5\xc2"
- "\xeb\x43\x74\x5d\xfe\xc4\x5a\xa1"
- "\x80\x51\x8a\x63\xd1\x27\x1b\x0a"
- "\x88\x2c\xc4\x7f\x1a\xa3\x28\xe5"
- "\xfd\xd0\x8a\xd4\x36\xa6\x19\xd5"
- "\xff\x41\x7a\x8b\x6e\x9a\x97\x14"
- "\x2a\xc8\xd0\xb8\xa3\x8e\x64\x32"
- "\xb7\x2d\x76\x9b\x3b\xe2\x3f\x91"
- "\xb4\x64\xbf\x59\x67\x14\xc3\xf5"
- "\xa8\x92\x4b\x85\xdf\x80\xcb\xb5"
- "\xc7\x80\xf9\x4a\xbc\xed\x67\x5a"
- "\x0b\x58\x65\x1f\xc9\x6e\x9b\x0a",
- .len = 128,
- }, {
- .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f"
- "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd"
- "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8",
- .klen = 24,
- .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a"
- "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e",
- .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3"
- "\x14\xc1\x7f\x66\xff\x3b\xa4\x80"
- "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a"
- "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf"
- "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff"
- "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd"
- "\xcd\x56\x02\x95\xc9\x54\x6e\x62"
- "\x6a\x97\x75\x1a\x21\x16\x46\xfb"
- "\xc2\xab\x62\x54\xef\xba\xae\x46"
- "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9"
- "\x05\x26\x23\x81\x19\x27\xad\x7b"
- "\x9c\x8b\xfb\x65\xa4\x61\xee\x69"
- "\x44\xbf\x59\xde\x03\x61\x11\x12"
- "\x8d\x94\x48\x47\xa9\x52\x16\xfb"
- "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c"
- "\xb6\x09\x21\x12\x42\x98\x13\xa1"
- "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea"
- "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c",
- .ctext = "\xd7\xb4\xfc\xcc\x1f\xf7\xfc\x7d"
- "\x69\xfa\xcb\x01\x60\xf3\x5a\x14"
- "\xfe\x8c\x4e\xfa\x09\xb5\x0d\xda"
- "\xff\xdd\xba\xdf\xa3\x6b\x3a\x87"
- "\x21\xbb\xf8\x62\x14\x22\xdd\x9b"
- "\x92\x23\xaa\xd7\xcc\xb2\x15\xd0"
- "\xbd\x81\x95\x24\xc2\xc6\x53\x5b"
- "\xf7\x3c\xa0\xf7\x36\xbc\xbf\xf3"
- "\xfc\x1c\x6e\xe0\x71\x8d\xa1\x3d"
- "\x8e\x1a\xc5\xba\xd5\x68\xd4\x7a"
- "\xe0\x4f\x0a\x14\x89\x0b\xa6\x2f"
- "\x18\xc5\x38\x76\xf1\xe7\x5c\xae"
- "\x7a\xbb\x27\x1c\xf0\x7c\x6c\x14"
- "\x07\xb7\x49\x6e\x29\x04\x38\x31"
- "\x91\xe8\x1d\x0f\xfc\x3b\xb8\x20"
- "\x58\x64\x11\xa1\xf5\xba\xa3\x62"
- "\x92\xcf\x44\x63\x2c\xe8\x10\xb5"
- "\xf0\x97\x86\xcb\x5f\xc1\x80\x7a",
- .len = 144,
- }, {
- .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d"
- "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c"
- "\x54\x84\x2a\x06\xb5\xd1\x34\x57",
- .klen = 24,
- .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33"
- "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97",
- .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91"
- "\xb2\xc6\x56\xb3\x23\x29\x43\xe0"
- "\xfb\xcc\x21\x38\x64\x78\x9e\x78"
- "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01"
- "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58"
- "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d"
- "\x90\xb7\x60\x78\xa8\x9f\x52\xe3"
- "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53"
- "\x42\x0e\x15\x31\xf6\x48\xa3\x0a"
- "\x20\xf0\x79\x67\xb1\x83\x26\x66"
- "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd"
- "\x45\x87\xa4\x14\x1b\xef\xe7\x16"
- "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e"
- "\x01\x68\xc1\x7d\xa2\x15\x49\x74"
- "\x53\x82\xc2\x10\xa8\x45\x73\x4d"
- "\x41\xcc\x24\xa3\x42\xff\x30\xd1"
- "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c"
- "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe"
- "\x52\xf1\x34\x78\x34\x53\x30\x5b"
- "\x43\x43\x51\x6a\x02\x81\x64\x0c",
- .ctext = "\x71\xf6\x96\x02\x07\x71\x1a\x08"
- "\x7c\xfe\x33\xc4\xc9\xbe\xe2\xed"
- "\xd0\xcc\x5d\x27\x75\xb4\x5d\x8d"
- "\x24\x03\xe4\x96\x31\x94\x0e\x38"
- "\x14\x4f\xad\x16\x58\x0d\x73\xdc"
- "\xbe\x5b\xcb\x38\xeb\x4d\xbc\x9a"
- "\x44\x69\x7a\x12\x91\x14\x52\xfa"
- "\xd2\xa2\xc5\x66\xd7\xaf\x4d\xb9"
- "\xb1\x58\x24\x10\xde\x6a\xee\x7e"
- "\x45\xf3\x76\xea\x47\x8a\xe6\x96"
- "\x41\xf2\x96\x2d\x3c\xec\xcf\xc6"
- "\x1d\xf4\x26\xc0\xea\x90\x27\x6e"
- "\x87\xef\xb5\x39\x38\xdb\xad\xbf"
- "\x57\x9a\x1d\xbc\x1d\xe5\x16\x91"
- "\x41\x45\xbe\x67\x6c\x42\x0f\xad"
- "\xcf\xfb\xcd\xf1\x4c\xd8\x73\xe7"
- "\x24\x3b\xd7\x03\xeb\xd1\xb1\x1b"
- "\x7d\xc9\x3d\x34\xd7\xb8\x69\x03"
- "\x76\x95\x32\x26\xed\x88\x76\x89"
- "\x13\xc6\xc8\xa6\x60\xf9\x73\x4d",
- .len = 160,
- }, {
- .key = "\x82\x8e\x9e\x06\x7b\xc2\xe9\xb3"
- "\x06\xa3\xfa\x99\x42\x67\x87\xac"
- "\x21\xc7\xb0\x98\x6c\xf8\x26\x57"
- "\x08\xdd\x92\x02\x77\x7b\x35\xe7",
- .klen = 32,
- .iv = "\xa1\xad\xcb\xdd\xd5\x19\xb6\xd4"
- "\x0b\x62\x58\xb0\x6c\xa0\xc1\x58",
- .ptext = "\x14\x0d\x8a\x09\x16\x00\x00\xf1"
- "\xc0\x20\x86\xf9\x21\xd1\x34\xe2",
- .ctext = "\x05\xe3\x34\xaf\x6c\x83\x14\x8b"
- "\x9d\x1c\xd6\x87\x74\x91\xdf\x17",
- .len = 16,
- }, {
- .key = "\xc9\xf3\xc4\x93\xd0\xcc\xaf\xb1"
- "\x1a\x42\x93\x71\xd8\x4e\xd8\xaa"
- "\x52\xad\x93\x2f\xe5\xd9\xaa\x5b"
- "\x47\x37\x3a\xed\x13\x92\x35\x16",
- .klen = 32,
- .iv = "\x81\xc8\x50\xd1\x74\xc3\x1c\x73"
- "\xbb\xab\x72\x83\x90\x5a\x15\xcb",
- .ptext = "\x65\x11\x93\xaf\xe1\x69\x6c\xbe"
- "\x25\x8c\x76\x87\x53\xa4\x80\xae"
- "\x51\x94\x36\x3f\xca\xe7\x45\x41"
- "\x76\x05\xbf\x8f\x9c\xad\xc0\xe3",
- .ctext = "\x6B\x00\x6E\x49\x7A\x6D\xE3\x04"
- "\x4E\xF7\x9F\x8A\x1F\x14\xBD\xB1"
- "\xD3\x5D\xA4\x30\x26\x85\x85\xEF"
- "\x12\xBC\xC7\xA1\x65\x82\xA7\x74",
- .len = 32,
- }, {
- .key = "\xd5\x9f\x52\x34\x12\x99\x8e\x42"
- "\xe0\x85\x04\x6f\xeb\xf1\x5d\xd0"
- "\xc1\xbf\x3f\x84\xd9\x1e\x71\x44"
- "\xd4\xb9\x40\x3c\x02\x2e\x21\x19",
- .klen = 32,
- .iv = "\x28\xc1\x97\x64\x81\x52\x57\x0e"
- "\x02\x8c\xab\x4c\xe2\x60\x14\xa5",
- .ptext = "\x5a\xb1\x33\x48\xaa\x51\xe9\xa4"
- "\x5c\x2d\xbe\x33\xcc\xc4\x7f\x96"
- "\xe8\xde\x2b\xe7\x35\x7a\x11\x4b"
- "\x13\x08\x32\xc6\x41\xd8\xec\x54"
- "\xa3\xd3\xda\x35\x43\x69\xf6\x88"
- "\x97\xca\x00\x1b\x02\x59\x24\x82",
- .ctext = "\x03\xaf\x76\xbd\x5e\x5b\xca\xc0"
- "\xae\x44\xa2\x2f\xc2\x76\x2f\x50"
- "\x6a\x73\x28\xf2\xba\xe8\xb2\xb8"
- "\x43\x61\x41\x92\xff\xac\xcb\xa6"
- "\x84\x31\xe3\x34\xd0\x37\x81\xab"
- "\x2b\x0e\x97\x3c\x4a\x2d\xa4\x83",
- .len = 48,
- }, {
- .key = "\x9c\x5d\xd7\x66\x36\xfa\x02\x20"
- "\x99\x61\x62\x86\x0f\x43\x2e\x05"
- "\x25\x8b\xfb\xf1\xae\x4c\xde\x18"
- "\x0b\xf8\xd0\x9d\xaa\xd4\x56\x04",
- .klen = 32,
- .iv = "\xcd\xa8\x61\x89\x8d\xbb\x72\xb6"
- "\x1e\xfe\x03\x34\x54\x88\x23\xe2",
- .ptext = "\x66\x42\x60\x24\xf3\xe4\xe9\x7e"
- "\x42\x20\xf4\x61\xce\x1c\x5e\x44"
- "\x02\x26\x91\xf7\x41\xa4\xab\x34"
- "\x29\x49\xdd\x78\x19\x8f\x10\x10"
- "\xf0\x61\xcf\x77\x18\x17\x61\xdf"
- "\xc4\xa8\x35\x0e\x75\x1b\x84\x6b"
- "\xc3\x3f\x31\x59\x5a\x9c\xf4\xc3"
- "\x43\xa9\xb7\xf8\x65\x40\x40\xba",
- .ctext = "\xb6\x41\x55\x8f\xeb\x16\x1e\x4c"
- "\x81\xa0\x85\x6c\xf0\x07\xa5\x2a"
- "\x12\x0f\x1d\xb2\xaa\xba\x85\x0f"
- "\xa6\x27\x1a\x91\xa6\xc5\x8c\x2a"
- "\xde\x8d\x3a\xa9\x8b\xcf\x24\xf1"
- "\x82\x51\x6b\xc8\x01\xd7\x7b\x89"
- "\x6c\xfc\xb1\x96\x6c\xa2\xd7\x1f"
- "\x4b\x7a\xd9\x8d\x34\xaa\xa0\x8a",
- .len = 64,
- }, {
- .key = "\x4b\x4e\x11\x91\x27\xcf\x8c\x66"
- "\x17\xfa\x5b\x4c\xa8\xb8\x0f\xa1"
- "\x99\x5b\x07\x56\xe1\x8d\x94\x8b"
- "\xf2\x86\x5a\x5f\x40\x83\xfa\x06",
- .klen = 32,
- .iv = "\xfd\x73\xee\x1c\x27\xf3\xb4\x38"
- "\xc5\x7c\x2e\xc5\x6e\xdb\x49\x0d",
- .ptext = "\x0a\xe2\xdd\x97\xdd\x5e\xd4\xb3"
- "\xc1\x49\x8f\x53\xb2\x40\x85\x1c"
- "\x90\x37\x2d\xbd\x21\x6b\x1f\x80"
- "\x56\x98\x76\x1e\xcf\x6c\x78\xd8"
- "\xa0\x3c\x79\xc3\x56\xf7\xfc\x64"
- "\x35\x58\x1c\x7c\xc4\x5f\x2a\x25"
- "\x8c\x01\x98\x1e\x1c\x1f\x15\x64"
- "\x50\xb5\xfa\x02\xd3\x54\xe5\x29"
- "\xe3\xd2\xa3\x83\x54\x40\x54\xc5"
- "\xd8\x1c\xc9\x84\x7d\xc8\x31\x49",
- .ctext = "\x53\x2a\xa8\xa0\x15\xaf\x2f\xc4"
- "\x7d\x31\xb4\x61\x80\x5f\xd1\xb6"
- "\xa4\x29\x40\x72\x1b\xb2\x96\xb7"
- "\x4d\x5e\x5b\x53\x44\xa4\xf1\xe9"
- "\xf0\x27\x2f\x26\x84\x66\x13\xa4"
- "\xb2\x19\x55\xb1\x18\xf3\x69\xfd"
- "\xb0\x2f\x08\x3f\xa5\x41\xe2\x34"
- "\x5e\x63\x57\x0e\xef\x17\x78\xbc"
- "\xc3\x65\x7c\xbe\x6b\xa3\xa3\xef"
- "\x58\x05\x30\x5a\x08\xbd\xf7\x0e",
- .len = 80,
- }, {
- .key = "\x77\x3b\xf5\xe7\x20\xf7\xe0\x0c"
- "\x3d\x3a\x83\x17\x83\x79\xd8\x29"
- "\x5a\x0a\x25\x7f\xe0\x21\x23\xff"
- "\x31\xfd\x60\x10\xe6\x63\xe2\xaf",
- .klen = 32,
- .iv = "\xdb\x4c\x0d\xc0\x36\xdb\xc7\xa1"
- "\xa4\x91\xd9\x05\xe6\xc4\x98\x00",
- .ptext = "\x8d\x4d\xc6\x5e\x01\x82\xb3\x39"
- "\xc8\x64\xa7\xcb\x05\x19\x84\x80"
- "\x3f\x9c\xa8\x4f\x64\xb3\x11\x4b"
- "\x0e\x21\xc4\x75\x04\x1d\x6f\xd5"
- "\x04\x04\x4d\xc9\xc0\x4b\x4a\x9c"
- "\x26\xb7\x68\x5a\xe4\xd0\x61\xe3"
- "\x2c\x93\x8e\x3f\xb4\x67\x07\x31"
- "\x02\x52\x0c\x0f\xe6\x6d\xa3\xd0"
- "\x48\x95\x83\x67\x23\x64\x31\x50"
- "\xd2\x5f\x69\x68\x8b\x71\xbf\x01"
- "\x29\x99\x86\x36\x2e\xdf\xf1\x7c"
- "\x08\x8c\x78\x7a\x93\x9a\x7d\x1b",
- .ctext = "\x92\x90\x48\x2f\x3a\x6b\x68\x43"
- "\x28\x9b\x7d\x1e\x46\x28\xd8\x58"
- "\x0f\x47\x8b\xb5\x83\x35\x35\x3e"
- "\xdf\x59\x3d\xb3\x47\xfc\xfc\x52"
- "\x86\xeb\xb3\x58\x54\xd5\x0a\xb4"
- "\xad\xbd\x5c\x09\xfc\x08\xc2\x01"
- "\x5e\x9b\x30\x11\xc4\x40\x2e\x32"
- "\x9c\xa0\xf1\xfd\xae\xd4\x75\x5e"
- "\x52\xd9\x19\x4d\xc1\xd4\xb6\x19"
- "\x88\xfb\x29\x17\x15\xbb\x60\xd6"
- "\x5a\xe9\x82\x89\xaf\x30\x4e\xd4"
- "\x47\xde\x86\x88\x95\x4c\x13\x59",
- .len = 96,
- }, {
- .key = "\xe0\x6a\x30\xe1\x35\xb5\xb0\x7c"
- "\x54\xc5\x73\x9b\x00\xe5\xe7\x02"
- "\xbe\x16\x59\xdc\xd9\x03\x17\x53"
- "\xa8\x37\xd1\x5f\x13\x8e\x45\xdb",
- .klen = 32,
- .iv = "\x54\xe9\x1c\xde\xfb\x26\x0e\x48"
- "\x35\x50\x4d\x9b\x4d\x12\x21\x0d",
- .ptext = "\x73\x72\xcf\xdb\xbd\xbc\xc0\xdf"
- "\x6b\xbb\xdf\x65\x6f\x2f\x43\x3b"
- "\x2d\x7c\x0e\x07\x7f\xa0\x95\xdd"
- "\xfc\x67\xc1\x11\x7a\xe2\xb5\x4a"
- "\xd1\x15\xb0\xd8\xe2\xf0\x35\x48"
- "\xd8\x81\x6a\x35\xae\x67\xbf\x61"
- "\xf2\x8a\xcf\x04\xc8\x09\x8b\x63"
- "\x31\x74\x95\xa5\x8d\x3c\xea\xe2"
- "\x5f\x67\xc4\x7e\x51\x88\xbf\xb5"
- "\x78\xef\x3a\x76\xd8\x1d\x00\x75"
- "\x2b\x7b\x28\x7c\xde\x4b\x39\x01"
- "\x5d\xde\x92\xfe\x90\x07\x09\xfd"
- "\xa5\xd1\xd3\x72\x11\x6d\xa4\x4e"
- "\xd1\x6e\x16\xd1\xf6\x39\x4f\xa0",
- .ctext = "\x3b\xc5\xee\xfc\x05\xaf\xa6\xb7"
- "\xfe\x12\x24\x79\x31\xad\x32\xb5"
- "\x64\x5a\x17\xc9\xbf\x1f\xdc\xce"
- "\x8d\x73\x00\x71\xd9\xfb\xd2\xe6"
- "\xc3\x54\xb4\xf3\x36\xe8\x89\x12"
- "\x5a\x32\x0b\xa6\xec\x5f\x89\xe7"
- "\xe8\x34\x92\xa6\xce\xde\x8f\xf9"
- "\x4f\xda\xed\x61\x8e\xb2\x81\xbe"
- "\xf2\x15\x85\xbe\xa1\x5f\x19\x85"
- "\x71\x7e\xda\x46\x59\xed\x5d\xb0"
- "\xd9\x68\x97\xe0\xcd\x1d\x1b\x65"
- "\xf5\xc9\x44\xe2\xb4\x42\x17\x7c"
- "\xe7\x58\xf3\x2f\xcf\xbe\x5c\x66"
- "\xaa\xd3\x61\xa5\x9a\x79\xbb\xa0",
- .len = 112,
- }, {
- .key = "\x60\xb6\xde\x17\xca\x4c\xe7\xe0"
- "\x07\x0d\x80\xc5\x8a\x2d\x5a\xc2"
- "\x2c\xb9\xa4\x5f\x2a\x85\x2c\x3d"
- "\x6d\x67\xc8\xee\x0f\xa2\xf4\x09",
- .klen = 32,
- .iv = "\x1a\xa5\xbc\x7e\x93\xf6\xdd\x28"
- "\xb7\x69\x27\xa1\x84\x95\x25\x5a",
- .ptext = "\x7b\x88\x00\xeb\xa5\xba\xa1\xa7"
- "\xd4\x40\x16\x74\x2b\x42\x37\xda"
- "\xe0\xaf\x89\x59\x41\x2f\x62\x00"
- "\xf5\x5a\x4e\x3b\x85\x27\xb2\xed"
- "\x1b\xa7\xaf\xbe\x89\xf3\x49\xb7"
- "\x8c\x63\xc9\x0c\x52\x00\x5f\x38"
- "\x3b\x3c\x0c\x4f\xdd\xe1\xbf\x90"
- "\x4a\x48\xbf\x3a\x95\xcb\x48\xa2"
- "\x92\x7c\x79\x81\xde\x18\x6e\x92"
- "\x1f\x36\xa9\x5d\x8d\xc4\xb6\x4d"
- "\xb2\xb4\x0e\x09\x6d\xf3\x3d\x01"
- "\x3d\x9b\x40\x47\xbc\x69\x31\xa1"
- "\x6a\x71\x26\xdc\xac\x10\x56\x63"
- "\x15\x23\x7d\x10\xe3\x76\x82\x41"
- "\xcd\x80\x57\x2f\xfc\x4d\x22\x7b"
- "\x57\xbb\x9a\x0a\x03\xe9\xb3\x13",
- .ctext = "\x37\x0d\x47\x21\xbc\x28\x0b\xf7"
- "\x85\x5f\x60\x57\xf2\x7f\x92\x20"
- "\x53\x1a\xbf\xd1\x7f\x8c\x39\x29"
- "\x0e\x18\xab\x0c\x00\x92\xd3\x68"
- "\x60\x56\x3b\x00\xef\xf8\x02\xfa"
- "\xcb\x92\x1a\x91\xe1\xf0\x4f\x8a"
- "\xc6\x4f\x65\x16\x71\x8b\x5d\xd5"
- "\x79\xa9\x6d\x68\x1b\x59\xe7\x2a"
- "\x1c\xd0\x5d\xfb\x06\x3b\x15\x72"
- "\xa8\xd1\x59\x9a\xb2\x6c\xf2\xd5"
- "\x19\xef\xde\x03\x4c\x75\x65\x38"
- "\x5b\xda\xc9\xf0\x44\x99\xb2\x6e"
- "\x78\xfb\x85\x5a\x92\x91\x1a\x0a"
- "\x13\x0c\x1b\x1c\xbe\xbe\x46\x6e"
- "\x73\xff\xc2\x6e\xb9\x06\x16\x7e"
- "\xf6\xc0\x01\x30\x34\x56\x46\x55",
- .len = 128,
- }, {
- .key = "\x2a\xed\x7d\x76\xfc\xc5\x49\x50"
- "\xf4\x90\x0f\xcc\x5d\xff\x0c\x3c"
- "\x14\x06\xaf\x68\x8f\xd7\xb6\x25"
- "\x1e\x10\x95\x2a\x71\x33\x17\x20",
- .klen = 32,
- .iv = "\x5b\x58\x47\xf8\xd5\x1e\x91\x81"
- "\x46\xe7\x25\x3a\x02\x45\x9c\x65",
- .ptext = "\x10\xaf\xde\x5c\x30\x79\x43\x28"
- "\x1c\x03\xf8\x50\x0f\x30\xa5\xef"
- "\x84\x19\x4c\x09\x40\x03\x75\x1f"
- "\x92\x8f\x88\x01\xda\x31\x7a\xe4"
- "\x48\xe3\xab\xb4\xe6\x1b\x0f\xac"
- "\xd9\xfa\x8d\x23\xe4\xc6\xa4\xa9"
- "\x2d\x9a\x54\x52\x44\x5c\x3c\x52"
- "\x61\xf0\x00\xca\xed\xab\xed\xe2"
- "\x44\x0b\xe0\x18\xba\xa5\x63\xd8"
- "\xdc\x5e\x1a\x4c\xf8\xde\x5e\x75"
- "\xdf\x42\x27\x7b\xe9\x11\x2f\x41"
- "\x3a\x72\x54\x3d\x44\x9c\x3e\x87"
- "\x8d\x8d\x43\x2f\xb2\xff\x87\xd4"
- "\xad\x98\x68\x72\x53\x61\x19\x7c"
- "\x20\x79\x8c\x2b\x37\x0b\x96\x15"
- "\xa5\x7d\x4e\x01\xe6\xea\xb6\xfa"
- "\xaa\xd3\x9d\xa2\xd9\x11\xc3\xc9"
- "\xd4\x0e\x3f\x3e\xfe\x35\x1e\xe5",
- .ctext = "\xb0\x2b\x75\x5f\x33\x1b\x05\x49"
- "\x06\xf1\x43\x91\xc2\x85\xfa\xac"
- "\x74\xd5\x8c\xc9\x47\x6e\x5a\xf6"
- "\x69\x33\x4c\xcb\x2f\x36\x4b\x41"
- "\xec\x05\x69\xab\x7f\x42\xc9\xd2"
- "\x26\x64\x51\x9e\x3d\x65\x35\xf0"
- "\x8d\x5e\x8a\xb1\xee\xdf\x1a\x98"
- "\x36\xd2\x37\x49\x5b\xe2\x57\x00"
- "\x1d\x72\x7e\xe8\x38\x11\x83\x15"
- "\xc7\x4e\x65\xa4\x2c\x9e\x6a\x3e"
- "\xb4\x78\x3f\xe9\x91\x5d\x06\xa9"
- "\xf1\xfc\x6b\x08\xe5\x2b\x2a\x99"
- "\x65\xa7\x2e\x47\xf9\xc2\xb1\x8b"
- "\x88\x2f\xb7\x62\x84\x63\x94\x00"
- "\x49\xa7\xd0\x2b\x54\x7a\x69\xb3"
- "\x04\x66\xfc\x97\x40\x92\xd1\xb8"
- "\xb4\x2a\x9e\xdb\x31\xcd\x48\x84"
- "\x29\x3b\x02\xac\xb8\x54\x95\xb4",
- .len = 144,
- }, {
- .key = "\x7b\xa7\x4d\x0a\x37\x30\xb9\xf5"
- "\x2a\x79\xb4\xbf\xdb\x7f\x9b\x64"
- "\x23\x43\xb5\x18\x34\xc4\x5f\xdf"
- "\xd9\x2a\x66\x58\x00\x44\xb5\xd9",
- .klen = 32,
- .iv = "\x75\x34\x30\xc1\xf0\x69\xdf\x0a"
- "\x52\xce\x4f\x1e\x2c\x41\x35\xec",
- .ptext = "\x81\x47\x55\x3a\xcd\xfe\xa2\x3d"
- "\x45\x53\xa7\x67\x61\x74\x25\x80"
- "\x98\x89\xfe\xf8\x6a\x9f\x51\x7c"
- "\xa4\xe4\xe7\xc7\xe0\x1a\xce\xbb"
- "\x4b\x46\x43\xb0\xab\xa8\xd6\x0c"
- "\xa0\xf0\xc8\x13\x29\xaf\xb8\x01"
- "\x6b\x0c\x7e\x56\xae\xb8\x58\x72"
- "\xa9\x24\x44\x61\xff\xf1\xac\xf8"
- "\x09\xa8\x48\x21\xd6\xab\x41\x73"
- "\x70\x6b\x92\x06\x61\xdc\xb4\x85"
- "\x76\x26\x7a\x84\xc3\x9e\x3a\x14"
- "\xe7\xf4\x2d\x95\x92\xad\x18\xcc"
- "\x44\xd4\x2c\x36\x57\xed\x2b\x9b"
- "\x3f\x2b\xcd\xe5\x11\xe3\x62\x33"
- "\x42\x3f\xb8\x2a\xb1\x37\x3f\x8b"
- "\xe8\xbd\x6b\x0b\x9f\x38\x5a\x5f"
- "\x82\x34\xb7\x96\x35\x58\xde\xab"
- "\x94\x98\x41\x5b\x3f\xac\x0a\x34"
- "\x56\xc0\x02\xef\x81\x6d\xb1\xff"
- "\x34\xe8\xc7\x6a\x31\x79\xba\xd8",
- .ctext = "\x4e\x00\x7c\x52\x45\x76\xf9\x3d"
- "\x1a\xd1\x72\xbc\xb9\x0f\xa9\xfb"
- "\x0a\xf5\xe8\x11\x66\x8b\xad\x68"
- "\x5a\x2e\xbf\x09\x33\x9d\xb6\x67"
- "\xe5\xcb\x0a\xe0\xac\xed\x73\x4b"
- "\xbb\x15\xde\xd8\xab\x33\x28\x5f"
- "\x96\x07\x3c\x28\x79\x88\x84\xc7"
- "\x13\xf7\x0d\xa5\x97\x3b\xd9\xb1"
- "\xf2\x65\xb0\xac\xbb\x8a\x97\xd1"
- "\x70\x3a\x91\x65\xc8\x39\x04\xe7"
- "\x1a\x9c\x80\x65\x2b\x69\x4b\xdc"
- "\xdc\xc7\xf1\x31\xda\xab\xb4\xd7"
- "\x46\x2e\x1d\xc9\x2e\xe9\x46\xec"
- "\xa4\xa1\x91\x6b\x4a\x09\xf9\x39"
- "\x7b\x7d\x6d\xf5\x43\x7f\xcc\x74"
- "\x96\xfa\x48\xd0\xe1\x74\x24\xd0"
- "\x19\x22\x24\x84\x2b\x12\x10\x46"
- "\x90\xbd\xa9\x93\xb7\xf7\x36\xd4"
- "\x48\xc7\x32\x83\x8c\xa9\xcd\x5a"
- "\x2f\x05\x33\xc1\x5b\x50\x70\xc4",
- .len = 160,
- }
-};
-
static const struct aead_testvec aria_gcm_tv_template[] = {
{
.key = "\xe9\x1e\x5e\x75\xda\x65\x55\x4a"
@@ -34730,81 +33364,6 @@ static const struct comp_testvec deflate_decomp_tv_template[] = {
},
};
-static const struct comp_testvec zlib_deflate_comp_tv_template[] = {
- {
- .inlen = 70,
- .outlen = 44,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28"
- "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
- "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
- "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
- "\x29\x07\x71\xbc\x08\x2b\x01\x00"
- "\x7c\x65\x19\x3d",
- }, {
- .inlen = 191,
- .outlen = 129,
- .input = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30"
- "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87"
- "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40"
- "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f"
- "\xfd\x7d\x93\x1e\x42\xe8\x51\xec"
- "\xee\x20\x9f\x64\x20\x6a\x78\x17"
- "\xae\x86\xc8\x23\x74\x59\x78\x80"
- "\x10\xb4\xb4\xce\x63\x88\x56\x14"
- "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e"
- "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c"
- "\xae\x51\x7e\x69\x17\x4b\x65\x02"
- "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48"
- "\xad\x65\x09\x64\x3b\xac\xeb\xd9"
- "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c"
- "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb"
- "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45"
- "\x4e",
- },
-};
-
-static const struct comp_testvec zlib_deflate_decomp_tv_template[] = {
- {
- .inlen = 128,
- .outlen = 191,
- .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30"
- "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10"
- "\x04\x09\x89\xc2\x85\x3f\x70\xb1"
- "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1"
- "\xef\x49\x68\x12\x51\xae\x76\x67"
- "\xd6\x27\x19\x88\x1a\xde\x85\xab"
- "\x21\xf2\x08\x5d\x16\x1e\x20\x04"
- "\x2d\xad\xf3\x18\xa2\x15\x85\x2d"
- "\x69\xc4\x42\x83\x23\xb6\x6c\x89"
- "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33"
- "\xca\x2f\xed\x62\xa9\x4c\x80\xff"
- "\x13\xaf\x52\x37\xed\x0e\x52\x6b"
- "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d"
- "\x02\x98\xfe\x8a\x87\x83\xa3\x4f"
- "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3"
- "\xa0\x79\xfa\x02\x2e\x32\x45\x4e",
- .output = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- }, {
- .inlen = 44,
- .outlen = 70,
- .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28"
- "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
- "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
- "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
- "\x29\x07\x71\xbc\x08\x2b\x01\x00"
- "\x7c\x65\x19\x3d",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
- },
-};
-
/*
* LZO test vectors (null-terminated strings).
*/
@@ -37692,177 +36251,6 @@ static const struct cipher_testvec aes_xctr_tv_template[] = {
/*
* Test vectors generated using https://github.com/google/hctr2
- *
- * To ensure compatibility with RFC 8452, some tests were sourced from
- * https://datatracker.ietf.org/doc/html/rfc8452
- */
-static const struct hash_testvec polyval_tv_template[] = {
- { // From RFC 8452
- .key = "\x31\x07\x28\xd9\x91\x1f\x1f\x38"
- "\x37\xb2\x43\x16\xc3\xfa\xb9\xa0",
- .plaintext = "\x65\x78\x61\x6d\x70\x6c\x65\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x48\x65\x6c\x6c\x6f\x20\x77\x6f"
- "\x72\x6c\x64\x00\x00\x00\x00\x00"
- "\x38\x00\x00\x00\x00\x00\x00\x00"
- "\x58\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\xad\x7f\xcf\x0b\x51\x69\x85\x16"
- "\x62\x67\x2f\x3c\x5f\x95\x13\x8f",
- .psize = 48,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x40\x00\x00\x00\x00\x00\x00\x00",
- .digest = "\xeb\x93\xb7\x74\x09\x62\xc5\xe4"
- "\x9d\x2a\x90\xa7\xdc\x5c\xec\x74",
- .psize = 32,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x80\x01\x00\x00\x00\x00\x00\x00",
- .digest = "\x81\x38\x87\x46\xbc\x22\xd2\x6b"
- "\x2a\xbc\x3d\xcb\x15\x75\x42\x22",
- .psize = 64,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x02\x00\x00\x00\x00\x00\x00",
- .digest = "\x1e\x39\xb6\xd3\x34\x4d\x34\x8f"
- "\x60\x44\xf8\x99\x35\xd1\xcf\x78",
- .psize = 80,
- .ksize = 16,
- },
- { // From RFC 8452
- .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a"
- "\xc5\xdb\xc6\x98\x7a\xda\x73\x77",
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x08\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x02\x00\x00\x00\x00\x00\x00",
- .digest = "\xff\xcd\x05\xd5\x77\x0f\x34\xad"
- "\x92\x67\xf0\xa5\x99\x94\xb1\x5a",
- .psize = 96,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\x90\xcc\xac\xee\xba\xd7\xd4\x68"
- "\x98\xa6\x79\x70\xdf\x66\x15\x6c",
- .plaintext = "",
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 0,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\xc1\x45\x71\xf0\x30\x07\x94\xe7"
- "\x3a\xdd\xe4\xc6\x19\x2d\x02\xa2",
- .plaintext = "\xc1\x5d\x47\xc7\x4c\x7c\x5e\x07"
- "\x85\x14\x8f\x79\xcc\x73\x83\xf7"
- "\x35\xb8\xcb\x73\x61\xf0\x53\x31"
- "\xbf\x84\xde\xb6\xde\xaf\xb0\xb8"
- "\xb7\xd9\x11\x91\x89\xfd\x1e\x4c"
- "\x84\x4a\x1f\x2a\x87\xa4\xaf\x62"
- "\x8d\x7d\x58\xf6\x43\x35\xfc\x53"
- "\x8f\x1a\xf6\x12\xe1\x13\x3f\x66"
- "\x91\x4b\x13\xd6\x45\xfb\xb0\x7a"
- "\xe0\x8b\x8e\x99\xf7\x86\x46\x37"
- "\xd1\x22\x9e\x52\xf3\x3f\xd9\x75"
- "\x2c\x2c\xc6\xbb\x0e\x08\x14\x29"
- "\xe8\x50\x2f\xd8\xbe\xf4\xe9\x69"
- "\x4a\xee\xf7\xae\x15\x65\x35\x1e",
- .digest = "\x00\x4f\x5d\xe9\x3b\xc0\xd6\x50"
- "\x3e\x38\x73\x86\xc6\xda\xca\x7f",
- .psize = 112,
- .ksize = 16,
- },
- { // Random ( 1)
- .key = "\x37\xbe\x68\x16\x50\xb9\x4e\xb0"
- "\x47\xde\xe2\xbd\xde\xe4\x48\x09",
- .plaintext = "\x87\xfc\x68\x9f\xff\xf2\x4a\x1e"
- "\x82\x3b\x73\x8f\xc1\xb2\x1b\x7a"
- "\x6c\x4f\x81\xbc\x88\x9b\x6c\xa3"
- "\x9c\xc2\xa5\xbc\x14\x70\x4c\x9b"
- "\x0c\x9f\x59\x92\x16\x4b\x91\x3d"
- "\x18\x55\x22\x68\x12\x8c\x63\xb2"
- "\x51\xcb\x85\x4b\xd2\xae\x0b\x1c"
- "\x5d\x28\x9d\x1d\xb1\xc8\xf0\x77"
- "\xe9\xb5\x07\x4e\x06\xc8\xee\xf8"
- "\x1b\xed\x72\x2a\x55\x7d\x16\xc9"
- "\xf2\x54\xe7\xe9\xe0\x44\x5b\x33"
- "\xb1\x49\xee\xff\x43\xfb\x82\xcd"
- "\x4a\x70\x78\x81\xa4\x34\x36\xe8"
- "\x4c\x28\x54\xa6\x6c\xc3\x6b\x78"
- "\xe7\xc0\x5d\xc6\x5d\x81\xab\x70"
- "\x08\x86\xa1\xfd\xf4\x77\x55\xfd"
- "\xa3\xe9\xe2\x1b\xdf\x99\xb7\x80"
- "\xf9\x0a\x4f\x72\x4a\xd3\xaf\xbb"
- "\xb3\x3b\xeb\x08\x58\x0f\x79\xce"
- "\xa5\x99\x05\x12\x34\xd4\xf4\x86"
- "\x37\x23\x1d\xc8\x49\xc0\x92\xae"
- "\xa6\xac\x9b\x31\x55\xed\x15\xc6"
- "\x05\x17\x37\x8d\x90\x42\xe4\x87"
- "\x89\x62\x88\x69\x1c\x6a\xfd\xe3"
- "\x00\x2b\x47\x1a\x73\xc1\x51\xc2"
- "\xc0\x62\x74\x6a\x9e\xb2\xe5\x21"
- "\xbe\x90\xb5\xb0\x50\xca\x88\x68"
- "\xe1\x9d\x7a\xdf\x6c\xb7\xb9\x98"
- "\xee\x28\x62\x61\x8b\xd1\x47\xf9"
- "\x04\x7a\x0b\x5d\xcd\x2b\x65\xf5"
- "\x12\xa3\xfe\x1a\xaa\x2c\x78\x42"
- "\xb8\xbe\x7d\x74\xeb\x59\xba\xba",
- .digest = "\xae\x11\xd4\x60\x2a\x5f\x9e\x42"
- "\x89\x04\xc2\x34\x8d\x55\x94\x0a",
- .psize = 256,
- .ksize = 16,
- },
-
-};
-
-/*
- * Test vectors generated using https://github.com/google/hctr2
*/
static const struct cipher_testvec aes_hctr2_tv_template[] = {
{
@@ -38533,4 +36921,355 @@ static const struct cipher_testvec aes_hctr2_tv_template[] = {
};
+#ifdef __LITTLE_ENDIAN
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x08\x00\x01\x00" /* LE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#else
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x00\x08\x00\x01" /* BE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#endif
+
+static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D"
+ "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18",
+ .clen = 16 + 0 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF"
+ "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4"
+ "\x42\x1D\xFD\xF8\x97\x6C",
+ .clen = 16 + 6 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A"
+ "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2"
+ "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3",
+ .clen = 16 + 16 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36"
+ "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC"
+ "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34"
+ "\xA2\x87\xFF\xCB\xFC",
+ .clen = 16 + 21 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0"
+ "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74"
+ "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C",
+ .clen = 16 + 0 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7"
+ "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60"
+ "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2",
+ .clen = 16 + 6 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B"
+ "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6"
+ "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E"
+ "\x40\xC4\xFF\x25\x5B\x36\xA2\x66",
+ .clen = 16 + 16 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE"
+ "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2"
+ "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A"
+ "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62",
+ .clen = 16 + 21 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_camellia_cts_cmac[] = {
+ /* rfc6803 sec 10 */
+ {
+ // "enc no plain"
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki
+ "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB"
+ "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki
+ "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA"
+ "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59"
+ "\xB8",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki
+ "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D"
+ "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D"
+ "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki
+ "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder
+ "13 bytes byte", // Plain
+ .plen = 16 + 13,
+ .ctext =
+ "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20"
+ "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15"
+ "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki
+ "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09"
+ "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66"
+ "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4"
+ "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5",
+ .clen = 16 + 30 + 16,
+ }, {
+ // "enc no plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1"
+ "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki
+ "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1"
+ "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83"
+ "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d"
+ "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki
+ "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32"
+ "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F"
+ "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A"
+ "\x12",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3"
+ "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki
+ "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74"
+ "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15"
+ "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C"
+ "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd"
+ "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki
+ "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25"
+ "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder
+ "13 bytes byte",
+ .plen = 16 + 13,
+ .ctext =
+ "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57"
+ "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1"
+ "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7"
+ "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki
+ "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e"
+ "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD"
+ "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB"
+ "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84"
+ "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74",
+ .clen = 16 + 30 + 16,
+ },
+};
+
#endif /* _CRYPTO_TESTMGR_H */
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index f921f30334f4..bf4f28742f77 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -25,9 +25,9 @@
* Third Edition.
*/
+#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/bitops.h>
-#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 86b2f067a416..368018cfa9bf 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -24,13 +24,13 @@
* Third Edition.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <linux/bitops.h>
/* Macros to compute the g() function in the encryption and decryption
@@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(twofish_mod_init);
+module_init(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/vmac.c b/crypto/vmac.c
deleted file mode 100644
index 4633b2dda1e0..000000000000
--- a/crypto/vmac.c
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- * VMAC: Message Authentication Code using Universal Hashing
- *
- * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
- *
- * Copyright (c) 2009, Intel Corporation.
- * Copyright (c) 2018, Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-/*
- * Derived from:
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Last modified: 17 APR 08, 1700 PDT
- */
-
-#include <asm/unaligned.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <asm/byteorder.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-#define VMAC_NONCEBYTES 16
-
-/* per-transform (per-key) context */
-struct vmac_tfm_ctx {
- struct crypto_cipher *cipher;
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
-};
-
-/* per-request context */
-struct vmac_desc_ctx {
- union {
- u8 partial[VMAC_NHBYTES]; /* partial block */
- __le64 partial_words[VMAC_NHBYTES / 8];
- };
- unsigned int partial_size; /* size of the partial block */
- bool first_block_processed;
- u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
- union {
- u8 bytes[VMAC_NONCEBYTES];
- __be64 pads[VMAC_NONCEBYTES / 8];
- } nonce;
- unsigned int nonce_size; /* nonce bytes filled so far */
-};
-
-/*
- * Constants and masks
- */
-#define UINT64_C(x) x##ULL
-static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
-static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
-static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
-static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
-static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
-
-#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
-
-#ifdef __LITTLE_ENDIAN
-#define INDEX_HIGH 1
-#define INDEX_LOW 0
-#else
-#define INDEX_HIGH 0
-#define INDEX_LOW 1
-#endif
-
-/*
- * The following routines are used in this implementation. They are
- * written via macros to simulate zero-overhead call-by-reference.
- *
- * MUL64: 64x64->128-bit multiplication
- * PMUL64: assumes top bits cleared on inputs
- * ADD128: 128x128->128-bit addition
- */
-
-#define ADD128(rh, rl, ih, il) \
- do { \
- u64 _il = (il); \
- (rl) += (_il); \
- if ((rl) < (_il)) \
- (rh)++; \
- (rh) += (ih); \
- } while (0)
-
-#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
-
-#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m >> 32), (m << 32)); \
- } while (0)
-
-#define MUL64(rh, rl, i1, i2) \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m1 = MUL32(_i1, _i2>>32); \
- u64 m2 = MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
- ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
- } while (0)
-
-/*
- * For highest performance the L1 NH and L2 polynomial hashes should be
- * carefully implemented to take advantage of one's target architecture.
- * Here these two hash functions are defined multiple time; once for
- * 64-bit architectures, once for 32-bit SSE2 architectures, and once
- * for the rest (32-bit) architectures.
- * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
- * Optionally, nh_vmac_nhbytes can be defined (for multiples of
- * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
- * NH computations at once).
- */
-
-#ifdef CONFIG_64BIT
-
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-
-#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-#endif
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- do { \
- u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
- /* compute ab*cd, put bd into result registers */ \
- PMUL64(t3h, t3l, al, kh); \
- PMUL64(t2h, t2l, ah, kl); \
- PMUL64(t1h, t1l, ah, 2*kh); \
- PMUL64(ah, al, al, kl); \
- /* add 2 * ac to result */ \
- ADD128(ah, al, t1h, t1l); \
- /* add together ad + bc */ \
- ADD128(t2h, t2l, t3h, t3l); \
- /* now (ah,al), (t2l,2*t2h) need summing */ \
- /* first add the high registers, carrying into t2h */ \
- ADD128(t2h, ah, z, t2l); \
- /* double t2h and add top bit of ah */ \
- t2h = 2 * t2h + (ah >> 63); \
- ah &= m63; \
- /* now add the low registers */ \
- ADD128(ah, al, mh, ml); \
- ADD128(ah, al, z, t2h); \
- } while (0)
-
-#else /* ! CONFIG_64BIT */
-
-#ifndef nh_16
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- u64 t1, t2, m1, m2, t; \
- int i; \
- rh = rl = t = 0; \
- for (i = 0; i < nw; i += 2) { \
- t1 = pe64_to_cpup(mp+i) + kp[i]; \
- t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
- m2 = MUL32(t1 >> 32, t2); \
- m1 = MUL32(t1, t2 >> 32); \
- ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
- MUL32(t1, t2)); \
- rh += (u64)(u32)(m1 >> 32) \
- + (u32)(m2 >> 32); \
- t += (u64)(u32)m1 + (u32)m2; \
- } \
- ADD128(rh, rl, (t >> 32), (t << 32)); \
- } while (0)
-#endif
-
-static void poly_step_func(u64 *ahi, u64 *alo,
- const u64 *kh, const u64 *kl,
- const u64 *mh, const u64 *ml)
-{
-#define a0 (*(((u32 *)alo)+INDEX_LOW))
-#define a1 (*(((u32 *)alo)+INDEX_HIGH))
-#define a2 (*(((u32 *)ahi)+INDEX_LOW))
-#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
-#define k0 (*(((u32 *)kl)+INDEX_LOW))
-#define k1 (*(((u32 *)kl)+INDEX_HIGH))
-#define k2 (*(((u32 *)kh)+INDEX_LOW))
-#define k3 (*(((u32 *)kh)+INDEX_HIGH))
-
- u64 p, q, t;
- u32 t2;
-
- p = MUL32(a3, k3);
- p += p;
- p += *(u64 *)mh;
- p += MUL32(a0, k2);
- p += MUL32(a1, k1);
- p += MUL32(a2, k0);
- t = (u32)(p);
- p >>= 32;
- p += MUL32(a0, k3);
- p += MUL32(a1, k2);
- p += MUL32(a2, k1);
- p += MUL32(a3, k0);
- t |= ((u64)((u32)p & 0x7fffffff)) << 32;
- p >>= 31;
- p += (u64)(((u32 *)ml)[INDEX_LOW]);
- p += MUL32(a0, k0);
- q = MUL32(a1, k3);
- q += MUL32(a2, k2);
- q += MUL32(a3, k1);
- q += q;
- p += q;
- t2 = (u32)(p);
- p >>= 32;
- p += (u64)(((u32 *)ml)[INDEX_HIGH]);
- p += MUL32(a0, k1);
- p += MUL32(a1, k0);
- q = MUL32(a2, k3);
- q += MUL32(a3, k2);
- q += q;
- p += q;
- *(u64 *)(alo) = (p << 32) | t2;
- p >>= 32;
- *(u64 *)(ahi) = p + t;
-
-#undef a0
-#undef a1
-#undef a2
-#undef a3
-#undef k0
-#undef k1
-#undef k2
-#undef k3
-}
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
-
-#endif /* end of specialized NH and poly definitions */
-
-/* At least nh_16 is defined. Defined others as needed here */
-#ifndef nh_16_2
-#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_16(mp, kp, nw, rh, rl); \
- nh_16(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-#ifndef nh_vmac_nhbytes
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- nh_16(mp, kp, nw, rh, rl)
-#endif
-#ifndef nh_vmac_nhbytes_2
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
- nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-
-static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
-{
- u64 rh, rl, t, z = 0;
-
- /* fully reduce (p1,p2)+(len,0) mod p127 */
- t = p1 >> 63;
- p1 &= m63;
- ADD128(p1, p2, len, t);
- /* At this point, (p1,p2) is at most 2^127+(len<<64) */
- t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
- ADD128(p1, p2, z, t);
- p1 &= m63;
-
- /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
- t = p1 + (p2 >> 32);
- t += (t >> 32);
- t += (u32)t > 0xfffffffeu;
- p1 += (t >> 32);
- p2 += (p1 << 32);
-
- /* compute (p1+k1)%p64 and (p2+k2)%p64 */
- p1 += k1;
- p1 += (0 - (p1 < k1)) & 257;
- p2 += k2;
- p2 += (0 - (p2 < k2)) & 257;
-
- /* compute (p1+k1)*(p2+k2)%p64 */
- MUL64(rh, rl, p1, p2);
- t = rh >> 56;
- ADD128(t, rl, z, rh);
- rh <<= 8;
- ADD128(t, rl, z, rh);
- t += t << 8;
- rl += t;
- rl += (0 - (rl < t)) & 257;
- rl += (0 - (rl > p64-1)) & 257;
- return rl;
-}
-
-/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
-static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx,
- const __le64 *mptr, unsigned int blocks)
-{
- const u64 *kptr = tctx->nhkey;
- const u64 pkh = tctx->polykey[0];
- const u64 pkl = tctx->polykey[1];
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
- u64 rh, rl;
-
- if (!dctx->first_block_processed) {
- dctx->first_block_processed = true;
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- ADD128(ch, cl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- blocks--;
- }
-
- while (blocks--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
-
- dctx->polytmp[0] = ch;
- dctx->polytmp[1] = cl;
-}
-
-static int vmac_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- __be64 out[2];
- u8 in[16] = { 0 };
- unsigned int i;
- int err;
-
- if (keylen != VMAC_KEY_LEN)
- return -EINVAL;
-
- err = crypto_cipher_setkey(tctx->cipher, key, keylen);
- if (err)
- return err;
-
- /* Fill nh key */
- in[0] = 0x80;
- for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->nhkey[i] = be64_to_cpu(out[0]);
- tctx->nhkey[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- }
-
- /* Fill poly key */
- in[0] = 0xC0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
- tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
- in[15]++;
- }
-
- /* Fill ip key */
- in[0] = 0xE0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
- do {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->l3key[i] = be64_to_cpu(out[0]);
- tctx->l3key[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
- }
-
- return 0;
-}
-
-static int vmac_init(struct shash_desc *desc)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->partial_size = 0;
- dctx->first_block_processed = false;
- memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
- dctx->nonce_size = 0;
- return 0;
-}
-
-static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int n;
-
- /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
- if (dctx->nonce_size < VMAC_NONCEBYTES) {
- n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
- memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
- dctx->nonce_size += n;
- p += n;
- len -= n;
- }
-
- if (dctx->partial_size) {
- n = min(len, VMAC_NHBYTES - dctx->partial_size);
- memcpy(&dctx->partial[dctx->partial_size], p, n);
- dctx->partial_size += n;
- p += n;
- len -= n;
- if (dctx->partial_size == VMAC_NHBYTES) {
- vhash_blocks(tctx, dctx, dctx->partial_words, 1);
- dctx->partial_size = 0;
- }
- }
-
- if (len >= VMAC_NHBYTES) {
- n = round_down(len, VMAC_NHBYTES);
- /* TODO: 'p' may be misaligned here */
- vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
- p += n;
- len -= n;
- }
-
- if (len) {
- memcpy(dctx->partial, p, len);
- dctx->partial_size = len;
- }
-
- return 0;
-}
-
-static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx)
-{
- unsigned int partial = dctx->partial_size;
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
-
- /* L1 and L2-hash the final block if needed */
- if (partial) {
- /* Zero-pad to next 128-bit boundary */
- unsigned int n = round_up(partial, 16);
- u64 rh, rl;
-
- memset(&dctx->partial[partial], 0, n - partial);
- nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
- rh &= m62;
- if (dctx->first_block_processed)
- poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
- rh, rl);
- else
- ADD128(ch, cl, rh, rl);
- }
-
- /* L3-hash the 128-bit output of L2-hash */
- return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
-}
-
-static int vmac_final(struct shash_desc *desc, u8 *out)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- int index;
- u64 hash, pad;
-
- if (dctx->nonce_size != VMAC_NONCEBYTES)
- return -EINVAL;
-
- /*
- * The VMAC specification requires a nonce at least 1 bit shorter than
- * the block cipher's block length, so we actually only accept a 127-bit
- * nonce. We define the unused bit to be the first one and require that
- * it be 0, so the needed prepending of a 0 bit is implicit.
- */
- if (dctx->nonce.bytes[0] & 0x80)
- return -EINVAL;
-
- /* Finish calculating the VHASH of the message */
- hash = vhash_final(tctx, dctx);
-
- /* Generate pseudorandom pad by encrypting the nonce */
- BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
- index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
- dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
- crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
- dctx->nonce.bytes);
- pad = be64_to_cpu(dctx->nonce.pads[index]);
-
- /* The VMAC is the sum of VHASH and the pseudorandom pad */
- put_unaligned_be64(hash + pad, out);
- return 0;
-}
-
-static int vmac_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- tctx->cipher = cipher;
- return 0;
-}
-
-static void vmac_exit_tfm(struct crypto_tfm *tfm)
-{
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(tctx->cipher);
-}
-
-static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct shash_instance *inst;
- struct crypto_cipher_spawn *spawn;
- struct crypto_alg *alg;
- u32 mask;
- int err;
-
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
- spawn = shash_instance_ctx(inst);
-
- err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, mask);
- if (err)
- goto err_free_inst;
- alg = crypto_spawn_cipher_alg(spawn);
-
- err = -EINVAL;
- if (alg->cra_blocksize != VMAC_NONCEBYTES)
- goto err_free_inst;
-
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
- if (err)
- goto err_free_inst;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
-
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
- inst->alg.base.cra_init = vmac_init_tfm;
- inst->alg.base.cra_exit = vmac_exit_tfm;
-
- inst->alg.descsize = sizeof(struct vmac_desc_ctx);
- inst->alg.digestsize = VMAC_TAG_LEN / 8;
- inst->alg.init = vmac_init;
- inst->alg.update = vmac_update;
- inst->alg.final = vmac_final;
- inst->alg.setkey = vmac_setkey;
-
- inst->free = shash_free_singlespawn_instance;
-
- err = shash_register_instance(tmpl, inst);
- if (err) {
-err_free_inst:
- shash_free_singlespawn_instance(inst);
- }
- return err;
-}
-
-static struct crypto_template vmac64_tmpl = {
- .name = "vmac64",
- .create = vmac_create,
- .module = THIS_MODULE,
-};
-
-static int __init vmac_module_init(void)
-{
- return crypto_register_template(&vmac64_tmpl);
-}
-
-static void __exit vmac_module_exit(void)
-{
- crypto_unregister_template(&vmac64_tmpl);
-}
-
-subsys_initcall(vmac_module_init);
-module_exit(vmac_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac64");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 5e820afa3c78..229b189a7988 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -21,10 +21,10 @@
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <asm/byteorder.h>
-#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48
@@ -37,9 +37,6 @@
struct wp512_ctx {
u8 bitLength[WP512_LENGTHBYTES];
- u8 buffer[WP512_BLOCK_SIZE];
- int bufferBits;
- int bufferPos;
u64 hash[WP512_DIGEST_SIZE/8];
};
@@ -779,16 +776,16 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
* The core Whirlpool transform.
*/
-static void wp512_process_buffer(struct wp512_ctx *wctx) {
+static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx,
+ const u8 *buffer) {
int i, r;
u64 K[8]; /* the round key */
u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */
u64 L[8];
- const __be64 *buffer = (const __be64 *)wctx->buffer;
for (i = 0; i < 8; i++)
- block[i] = be64_to_cpu(buffer[i]);
+ block[i] = get_unaligned_be64(buffer + i * 8);
state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]);
@@ -991,8 +988,6 @@ static int wp512_init(struct shash_desc *desc) {
int i;
memset(wctx->bitLength, 0, 32);
- wctx->bufferBits = wctx->bufferPos = 0;
- wctx->buffer[0] = 0;
for (i = 0; i < 8; i++) {
wctx->hash[i] = 0L;
}
@@ -1000,84 +995,54 @@ static int wp512_init(struct shash_desc *desc) {
return 0;
}
-static int wp512_update(struct shash_desc *desc, const u8 *source,
- unsigned int len)
+static void wp512_add_length(u8 *bitLength, u64 value)
{
- struct wp512_ctx *wctx = shash_desc_ctx(desc);
- int sourcePos = 0;
- unsigned int bits_len = len * 8; // convert to number of bits
- int sourceGap = (8 - ((int)bits_len & 7)) & 7;
- int bufferRem = wctx->bufferBits & 7;
+ u32 carry;
int i;
- u32 b, carry;
- u8 *buffer = wctx->buffer;
- u8 *bitLength = wctx->bitLength;
- int bufferBits = wctx->bufferBits;
- int bufferPos = wctx->bufferPos;
- u64 value = bits_len;
for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) {
carry += bitLength[i] + ((u32)value & 0xff);
bitLength[i] = (u8)carry;
carry >>= 8;
value >>= 8;
}
- while (bits_len > 8) {
- b = ((source[sourcePos] << sourceGap) & 0xff) |
- ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap));
- buffer[bufferPos++] |= (u8)(b >> bufferRem);
- bufferBits += 8 - bufferRem;
- if (bufferBits == WP512_BLOCK_SIZE * 8) {
- wp512_process_buffer(wctx);
- bufferBits = bufferPos = 0;
- }
- buffer[bufferPos] = b << (8 - bufferRem);
- bufferBits += bufferRem;
- bits_len -= 8;
- sourcePos++;
- }
- if (bits_len > 0) {
- b = (source[sourcePos] << sourceGap) & 0xff;
- buffer[bufferPos] |= b >> bufferRem;
- } else {
- b = 0;
- }
- if (bufferRem + bits_len < 8) {
- bufferBits += bits_len;
- } else {
- bufferPos++;
- bufferBits += 8 - bufferRem;
- bits_len -= 8 - bufferRem;
- if (bufferBits == WP512_BLOCK_SIZE * 8) {
- wp512_process_buffer(wctx);
- bufferBits = bufferPos = 0;
- }
- buffer[bufferPos] = b << (8 - bufferRem);
- bufferBits += (int)bits_len;
- }
+}
- wctx->bufferBits = bufferBits;
- wctx->bufferPos = bufferPos;
+static int wp512_update(struct shash_desc *desc, const u8 *source,
+ unsigned int len)
+{
+ struct wp512_ctx *wctx = shash_desc_ctx(desc);
+ unsigned int remain = len % WP512_BLOCK_SIZE;
+ u64 bits_len = (len - remain) * 8ull;
+ u8 *bitLength = wctx->bitLength;
- return 0;
+ wp512_add_length(bitLength, bits_len);
+ do {
+ wp512_process_buffer(wctx, source);
+ source += WP512_BLOCK_SIZE;
+ bits_len -= WP512_BLOCK_SIZE * 8;
+ } while (bits_len);
+
+ return remain;
}
-static int wp512_final(struct shash_desc *desc, u8 *out)
+static int wp512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int bufferPos, u8 *out)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
- u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
- int bufferBits = wctx->bufferBits;
- int bufferPos = wctx->bufferPos;
__be64 *digest = (__be64 *)out;
+ u8 buffer[WP512_BLOCK_SIZE];
- buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
+ wp512_add_length(bitLength, bufferPos * 8);
+ memcpy(buffer, src, bufferPos);
+ buffer[bufferPos] = 0x80U;
bufferPos++;
if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
if (bufferPos < WP512_BLOCK_SIZE)
memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
- wp512_process_buffer(wctx);
+ wp512_process_buffer(wctx, buffer);
bufferPos = 0;
}
if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES)
@@ -1086,31 +1051,32 @@ static int wp512_final(struct shash_desc *desc, u8 *out)
bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
- wp512_process_buffer(wctx);
+ wp512_process_buffer(wctx, buffer);
+ memzero_explicit(buffer, sizeof(buffer));
for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[i] = cpu_to_be64(wctx->hash[i]);
- wctx->bufferBits = bufferBits;
- wctx->bufferPos = bufferPos;
return 0;
}
-static int wp384_final(struct shash_desc *desc, u8 *out)
+static int wp384_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
u8 D[64];
- wp512_final(desc, D);
+ wp512_finup(desc, src, len, D);
memcpy(out, D, WP384_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
return 0;
}
-static int wp256_final(struct shash_desc *desc, u8 *out)
+static int wp256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
u8 D[64];
- wp512_final(desc, D);
+ wp512_finup(desc, src, len, D);
memcpy(out, D, WP256_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
@@ -1121,11 +1087,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP512_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp512_final,
+ .finup = wp512_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
.cra_driver_name = "wp512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1133,11 +1100,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP384_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp384_final,
+ .finup = wp384_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
.cra_driver_name = "wp384-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1145,11 +1113,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP256_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp256_final,
+ .finup = wp256_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
.cra_driver_name = "wp256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1169,7 +1138,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-subsys_initcall(wp512_mod_init);
+module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 6074c5c1da49..6c5f6766fdd6 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -8,9 +8,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
@@ -27,23 +30,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
*/
struct xcbc_tfm_ctx {
struct crypto_cipher *child;
- u8 ctx[];
-};
-
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | xcbc_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct xcbc_desc_ctx {
- unsigned int len;
- u8 ctx[];
+ u8 consts[];
};
#define XCBC_BLOCKSIZE 16
@@ -51,9 +38,8 @@ struct xcbc_desc_ctx {
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
- u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *consts = ctx->consts;
int err = 0;
u8 key1[XCBC_BLOCKSIZE];
int bs = sizeof(key1);
@@ -71,14 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -86,81 +68,37 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
-
- /* clearing the length */
- ctx->len = 0;
+ u8 *prev = shash_desc_ctx(pdesc);
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
- u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len -1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
- crypto_xor(prev, consts + offset, bs);
-
+ crypto_xor(prev, &tctx->consts[offset], bs);
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -191,7 +129,6 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
- unsigned long alignmask;
u32 mask;
int err;
@@ -218,27 +155,22 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alignmask = alg->cra_alignmask | 3;
- inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) +
+ alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
- crypto_tfm_ctx_alignment()) +
- (alignmask &
- ~(crypto_tfm_ctx_alignment() - 1)) +
- alg->cra_blocksize * 2;
-
- inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
- alignmask + 1) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
+
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
- inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.finup = crypto_xcbc_digest_finup;
inst->alg.setkey = crypto_xcbc_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -267,10 +199,10 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-subsys_initcall(crypto_xcbc_module_init);
+module_init(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("xcbc");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/xctr.c b/crypto/xctr.c
index 5c00147e8ec4..607ab82cb19b 100644
--- a/crypto/xctr.c
+++ b/crypto/xctr.c
@@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *data = walk->src.virt.addr;
+ u8 *data = walk->dst.virt.addr;
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
@@ -182,10 +182,10 @@ static void __exit crypto_xctr_module_exit(void)
crypto_unregister_template(&crypto_xctr_tmpl);
}
-subsys_initcall(crypto_xctr_module_init);
+module_init(crypto_xctr_module_init);
module_exit(crypto_xctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("xctr");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/xor.c b/crypto/xor.c
index 8e72e5d5db0d..f39621a57bb3 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -83,33 +83,30 @@ static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{
int speed;
- int i, j;
- ktime_t min, start, diff;
+ unsigned long reps;
+ ktime_t min, start, t0;
tmpl->next = template_list;
template_list = tmpl;
preempt_disable();
- min = (ktime_t)S64_MAX;
- for (i = 0; i < 3; i++) {
- start = ktime_get();
- for (j = 0; j < REPS; j++) {
- mb(); /* prevent loop optimization */
- tmpl->do_2(BENCH_SIZE, b1, b2);
- mb();
- }
- diff = ktime_sub(ktime_get(), start);
- if (diff < min)
- min = diff;
- }
+ reps = 0;
+ t0 = ktime_get();
+ /* delay start until time has advanced */
+ while ((start = ktime_get()) == t0)
+ cpu_relax();
+ do {
+ mb(); /* prevent loop optimization */
+ tmpl->do_2(BENCH_SIZE, b1, b2);
+ mb();
+ } while (reps++ < REPS || (t0 = ktime_get()) == start);
+ min = ktime_sub(t0, start);
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
- if (!min)
- min = 1;
- speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
+ speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
@@ -165,6 +162,7 @@ out:
static __exit void xor_exit(void) { }
+MODULE_DESCRIPTION("RAID-5 checksumming functions");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/crypto/xts.c b/crypto/xts.c
index 63c85b9e64e0..3da8f5e053d6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -28,7 +28,7 @@ struct xts_tfm_ctx {
struct xts_instance_ctx {
struct crypto_skcipher_spawn spawn;
- char name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_cipher_spawn tweak_spawn;
};
struct xts_request_ctx {
@@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
while (w.nbytes) {
unsigned int avail = w.nbytes;
- le128 *wsrc;
+ const le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
@@ -140,9 +140,9 @@ static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
return xts_xor_tweak(req, true, enc);
}
-static void xts_cts_done(struct crypto_async_request *areq, int err)
+static void xts_cts_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
le128 b;
if (!err) {
@@ -196,19 +196,19 @@ static int xts_cts_final(struct skcipher_request *req,
return 0;
}
-static void xts_encrypt_done(struct crypto_async_request *areq, int err)
+static void xts_encrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_encrypt);
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}
@@ -216,19 +216,19 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static void xts_decrypt_done(struct crypto_async_request *areq, int err)
+static void xts_decrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_decrypt);
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}
@@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm)
ctx->child = child;
- tweak = crypto_alloc_cipher(ictx->name, 0, 0);
+ tweak = crypto_spawn_cipher(&ictx->tweak_spawn);
if (IS_ERR(tweak)) {
crypto_free_skcipher(ctx->child);
return PTR_ERR(tweak);
@@ -333,14 +333,16 @@ static void xts_free_instance(struct skcipher_instance *inst)
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->spawn);
+ crypto_drop_cipher(&ictx->tweak_spawn);
kfree(inst);
}
static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
+ struct skcipher_alg_common *alg;
+ char name[CRYPTO_MAX_ALG_NAME];
struct skcipher_instance *inst;
struct xts_instance_ctx *ctx;
- struct skcipher_alg *alg;
const char *cipher_name;
u32 mask;
int err;
@@ -361,27 +363,27 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
- if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
err = crypto_grab_skcipher(&ctx->spawn,
skcipher_crypto_instance(inst),
- ctx->name, 0, mask);
+ name, 0, mask);
}
if (err)
goto err_free_inst;
- alg = crypto_skcipher_spawn_alg(&ctx->spawn);
+ alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
goto err_free_inst;
- if (crypto_skcipher_alg_ivsize(alg))
+ if (alg->ivsize)
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
@@ -395,34 +397,39 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ if (!memcmp(cipher_name, "ecb(", 4)) {
+ int len;
- len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
- if (len < 2 || len >= sizeof(ctx->name))
+ len = strscpy(name, cipher_name + 4, sizeof(name));
+ if (len < 2)
goto err_free_inst;
- if (ctx->name[len - 1] != ')')
+ if (name[len - 1] != ')')
goto err_free_inst;
- ctx->name[len - 1] = 0;
+ name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+ "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_free_inst;
}
} else
goto err_free_inst;
+ err = crypto_grab_cipher(&ctx->tweak_spawn,
+ skcipher_crypto_instance(inst), name, 0, mask);
+ if (err)
+ goto err_free_inst;
+
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
(__alignof__(u64) - 1);
inst->alg.ivsize = XTS_BLOCK_SIZE;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
+ inst->alg.min_keysize = alg->min_keysize * 2;
+ inst->alg.max_keysize = alg->max_keysize * 2;
inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
@@ -459,11 +466,11 @@ static void __exit xts_module_exit(void)
crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(xts_module_init);
+module_init(xts_module_init);
module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
MODULE_ALIAS_CRYPTO("xts");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
MODULE_SOFTDEP("pre: ecb");
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c
index 55d1c8a76127..175bb7ae0fcd 100644
--- a/crypto/xxhash_generic.c
+++ b/crypto/xxhash_generic.c
@@ -4,7 +4,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/xxhash.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define XXHASH64_BLOCK_SIZE 32
#define XXHASH64_DIGEST_SIZE 8
@@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(xxhash_mod_init);
+module_init(xxhash_mod_init);
module_exit(xxhash_mod_fini);
MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 154a969c83a8..cbbd0413751a 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -10,247 +10,304 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
+#include <linux/overflow.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
-#include <crypto/internal/scompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
-#define ZSTD_DEF_LEVEL 3
+#define ZSTD_DEF_LEVEL 3
+#define ZSTD_MAX_WINDOWLOG 18
+#define ZSTD_MAX_SIZE BIT(ZSTD_MAX_WINDOWLOG)
struct zstd_ctx {
zstd_cctx *cctx;
zstd_dctx *dctx;
- void *cwksp;
- void *dwksp;
+ size_t wksp_size;
+ zstd_parameters params;
+ u8 wksp[] __aligned(8) __counted_by(wksp_size);
};
-static zstd_parameters zstd_params(void)
-{
- return zstd_get_params(ZSTD_DEF_LEVEL, 0);
-}
-
-static int zstd_comp_init(struct zstd_ctx *ctx)
-{
- int ret = 0;
- const zstd_parameters params = zstd_params();
- const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
-
- ctx->cwksp = vzalloc(wksp_size);
- if (!ctx->cwksp) {
- ret = -ENOMEM;
- goto out;
- }
-
- ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
- if (!ctx->cctx) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(ctx->cwksp);
- goto out;
-}
+static DEFINE_MUTEX(zstd_stream_lock);
-static int zstd_decomp_init(struct zstd_ctx *ctx)
+static void *zstd_alloc_stream(void)
{
- int ret = 0;
- const size_t wksp_size = zstd_dctx_workspace_bound();
-
- ctx->dwksp = vzalloc(wksp_size);
- if (!ctx->dwksp) {
- ret = -ENOMEM;
- goto out;
- }
-
- ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
- if (!ctx->dctx) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(ctx->dwksp);
- goto out;
-}
+ zstd_parameters params;
+ struct zstd_ctx *ctx;
+ size_t wksp_size;
-static void zstd_comp_exit(struct zstd_ctx *ctx)
-{
- vfree(ctx->cwksp);
- ctx->cwksp = NULL;
- ctx->cctx = NULL;
-}
+ params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE);
-static void zstd_decomp_exit(struct zstd_ctx *ctx)
-{
- vfree(ctx->dwksp);
- ctx->dwksp = NULL;
- ctx->dctx = NULL;
-}
-
-static int __zstd_init(void *ctx)
-{
- int ret;
-
- ret = zstd_comp_init(ctx);
- if (ret)
- return ret;
- ret = zstd_decomp_init(ctx);
- if (ret)
- zstd_comp_exit(ctx);
- return ret;
-}
+ wksp_size = max(zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
+ if (!wksp_size)
+ return ERR_PTR(-EINVAL);
-static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
-{
- int ret;
- struct zstd_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kvmalloc(struct_size(ctx, wksp, wksp_size), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
- ret = __zstd_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ ctx->params = params;
+ ctx->wksp_size = wksp_size;
return ctx;
}
-static int zstd_init(struct crypto_tfm *tfm)
+static void zstd_free_stream(void *ctx)
{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_init(ctx);
+ kvfree(ctx);
}
-static void __zstd_exit(void *ctx)
-{
- zstd_comp_exit(ctx);
- zstd_decomp_exit(ctx);
-}
+static struct crypto_acomp_streams zstd_streams = {
+ .alloc_ctx = zstd_alloc_stream,
+ .free_ctx = zstd_free_stream,
+};
-static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static int zstd_init(struct crypto_acomp *acomp_tfm)
{
- __zstd_exit(ctx);
- kfree_sensitive(ctx);
-}
+ int ret = 0;
-static void zstd_exit(struct crypto_tfm *tfm)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+ mutex_lock(&zstd_stream_lock);
+ ret = crypto_acomp_alloc_streams(&zstd_streams);
+ mutex_unlock(&zstd_stream_lock);
- __zstd_exit(ctx);
+ return ret;
}
-static int __zstd_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx,
+ const void *src, void *dst, unsigned int *dlen)
{
size_t out_len;
- struct zstd_ctx *zctx = ctx;
- const zstd_parameters params = zstd_params();
- out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
+ ctx->cctx = zstd_init_cctx(ctx->wksp, ctx->wksp_size);
+ if (!ctx->cctx)
+ return -EINVAL;
+
+ out_len = zstd_compress_cctx(ctx->cctx, dst, req->dlen, src, req->slen,
+ &ctx->params);
if (zstd_is_error(out_len))
return -EINVAL;
+
*dlen = out_len;
+
return 0;
}
-static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int zstd_compress(struct acomp_req *req)
{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_acomp_stream *s;
+ unsigned int pos, scur, dcur;
+ unsigned int total_out = 0;
+ bool data_available = true;
+ zstd_out_buffer outbuf;
+ struct acomp_walk walk;
+ zstd_in_buffer inbuf;
+ struct zstd_ctx *ctx;
+ size_t pending_bytes;
+ size_t num_bytes;
+ int ret;
- return __zstd_compress(src, slen, dst, dlen, ctx);
-}
+ s = crypto_acomp_lock_stream_bh(&zstd_streams);
+ ctx = s->ctx;
-static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __zstd_compress(src, slen, dst, dlen, ctx);
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ goto out;
+
+ ctx->cctx = zstd_init_cstream(&ctx->params, 0, ctx->wksp, ctx->wksp_size);
+ if (!ctx->cctx) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ do {
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ outbuf.pos = 0;
+ outbuf.dst = (u8 *)walk.dst.virt.addr;
+ outbuf.size = dcur;
+
+ do {
+ scur = acomp_walk_next_src(&walk);
+ if (dcur == req->dlen && scur == req->slen) {
+ ret = zstd_compress_one(req, ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, &total_out);
+ acomp_walk_done_src(&walk, scur);
+ acomp_walk_done_dst(&walk, dcur);
+ goto out;
+ }
+
+ if (scur) {
+ inbuf.pos = 0;
+ inbuf.src = walk.src.virt.addr;
+ inbuf.size = scur;
+ } else {
+ data_available = false;
+ break;
+ }
+
+ num_bytes = zstd_compress_stream(ctx->cctx, &outbuf, &inbuf);
+ if (ZSTD_isError(num_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pending_bytes = zstd_flush_stream(ctx->cctx, &outbuf);
+ if (ZSTD_isError(pending_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
+ acomp_walk_done_src(&walk, inbuf.pos);
+ } while (dcur != outbuf.pos);
+
+ total_out += outbuf.pos;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (data_available);
+
+ pos = outbuf.pos;
+ num_bytes = zstd_end_stream(ctx->cctx, &outbuf);
+ if (ZSTD_isError(num_bytes))
+ ret = -EIO;
+ else
+ total_out += (outbuf.pos - pos);
+
+out:
+ if (ret)
+ req->dlen = 0;
+ else
+ req->dlen = total_out;
+
+ crypto_acomp_unlock_stream_bh(s);
+
+ return ret;
}
-static int __zstd_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int zstd_decompress_one(struct acomp_req *req, struct zstd_ctx *ctx,
+ const void *src, void *dst, unsigned int *dlen)
{
size_t out_len;
- struct zstd_ctx *zctx = ctx;
- out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
+ ctx->dctx = zstd_init_dctx(ctx->wksp, ctx->wksp_size);
+ if (!ctx->dctx)
+ return -EINVAL;
+
+ out_len = zstd_decompress_dctx(ctx->dctx, dst, req->dlen, src, req->slen);
if (zstd_is_error(out_len))
return -EINVAL;
+
*dlen = out_len;
+
return 0;
}
-static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int zstd_decompress(struct acomp_req *req)
{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_decompress(src, slen, dst, dlen, ctx);
-}
+ struct crypto_acomp_stream *s;
+ unsigned int total_out = 0;
+ unsigned int scur, dcur;
+ zstd_out_buffer outbuf;
+ struct acomp_walk walk;
+ zstd_in_buffer inbuf;
+ struct zstd_ctx *ctx;
+ size_t pending_bytes;
+ int ret;
-static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __zstd_decompress(src, slen, dst, dlen, ctx);
-}
+ s = crypto_acomp_lock_stream_bh(&zstd_streams);
+ ctx = s->ctx;
-static struct crypto_alg alg = {
- .cra_name = "zstd",
- .cra_driver_name = "zstd-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zstd_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = zstd_init,
- .cra_exit = zstd_exit,
- .cra_u = { .compress = {
- .coa_compress = zstd_compress,
- .coa_decompress = zstd_decompress } }
-};
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ goto out;
-static struct scomp_alg scomp = {
- .alloc_ctx = zstd_alloc_ctx,
- .free_ctx = zstd_free_ctx,
- .compress = zstd_scompress,
- .decompress = zstd_sdecompress,
- .base = {
- .cra_name = "zstd",
- .cra_driver_name = "zstd-scomp",
- .cra_module = THIS_MODULE,
+ ctx->dctx = zstd_init_dstream(ZSTD_MAX_SIZE, ctx->wksp, ctx->wksp_size);
+ if (!ctx->dctx) {
+ ret = -EINVAL;
+ goto out;
}
-};
-static int __init zstd_mod_init(void)
-{
- int ret;
+ do {
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ inbuf.pos = 0;
+ inbuf.size = scur;
+ inbuf.src = walk.src.virt.addr;
+ } else {
+ break;
+ }
+
+ do {
+ dcur = acomp_walk_next_dst(&walk);
+ if (dcur == req->dlen && scur == req->slen) {
+ ret = zstd_decompress_one(req, ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, &total_out);
+ acomp_walk_done_dst(&walk, dcur);
+ acomp_walk_done_src(&walk, scur);
+ goto out;
+ }
+
+ if (!dcur) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ outbuf.pos = 0;
+ outbuf.dst = (u8 *)walk.dst.virt.addr;
+ outbuf.size = dcur;
+
+ pending_bytes = zstd_decompress_stream(ctx->dctx, &outbuf, &inbuf);
+ if (ZSTD_isError(pending_bytes)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ total_out += outbuf.pos;
+
+ acomp_walk_done_dst(&walk, outbuf.pos);
+ } while (inbuf.pos != scur);
+
+ acomp_walk_done_src(&walk, scur);
+ } while (ret == 0);
- ret = crypto_register_alg(&alg);
+out:
if (ret)
- return ret;
+ req->dlen = 0;
+ else
+ req->dlen = total_out;
- ret = crypto_register_scomp(&scomp);
- if (ret)
- crypto_unregister_alg(&alg);
+ crypto_acomp_unlock_stream_bh(s);
return ret;
}
+static struct acomp_alg zstd_acomp = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "zstd-generic",
+ .cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .cra_module = THIS_MODULE,
+ },
+ .init = zstd_init,
+ .compress = zstd_compress,
+ .decompress = zstd_decompress,
+};
+
+static int __init zstd_mod_init(void)
+{
+ return crypto_register_acomp(&zstd_acomp);
+}
+
static void __exit zstd_mod_fini(void)
{
- crypto_unregister_alg(&alg);
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&zstd_acomp);
+ crypto_acomp_free_streams(&zstd_streams);
}
-subsys_initcall(zstd_mod_init);
+module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");