summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-26 08:32:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-26 08:32:52 -0700
commit733f7e9c18c5e377025c1bfdce6bc9a7d55649be (patch)
tree19adc4c70522756ef682181d58b231005fed5a32 /crypto
parent98f99e67a1dc456e9a542584819b2aa265ffc737 (diff)
parent482c84e906e535072c55395acabd3a58e9443d12 (diff)
Merge tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Total usage stats now include all that returned errors (instead of just some) - Remove maximum hash statesize limit - Add cloning support for hmac and unkeyed hashes - Demote BUG_ON in crypto_unregister_alg to a WARN_ON Algorithms: - Use RIP-relative addressing on x86 to prepare for PIE build - Add accelerated AES/GCM stitched implementation on powerpc P10 - Add some test vectors for cmac(camellia) - Remove failure case where jent is unavailable outside of FIPS mode in drbg - Add permanent and intermittent health error checks in jitter RNG Drivers: - Add support for 402xx devices in qat - Add support for HiSTB TRNG - Fix hash concurrency issues in stm32 - Add OP-TEE firmware support in caam" * tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (139 commits) i2c: designware: Add doorbell support for Mendocino i2c: designware: Use PCI PSP driver for communication powerpc: Move Power10 feature PPC_MODULE_FEATURE_P10 crypto: p10-aes-gcm - Remove POWER10_CPU dependency crypto: testmgr - Add some test vectors for cmac(camellia) crypto: cryptd - Add support for cloning hashes crypto: cryptd - Convert hash to use modern init_tfm/exit_tfm crypto: hmac - Add support for cloning crypto: hash - Add crypto_clone_ahash/shash crypto: api - Add crypto_clone_tfm crypto: api - Add crypto_tfm_get crypto: x86/sha - Use local .L symbols for code crypto: x86/crc32 - Use local .L symbols for code crypto: x86/aesni - Use local .L symbols for code crypto: x86/sha256 - Use RIP-relative addressing crypto: x86/ghash - Use RIP-relative addressing crypto: x86/des3 - Use RIP-relative addressing crypto: x86/crc32c - Use RIP-relative addressing crypto: x86/cast6 - Use RIP-relative addressing crypto: x86/cast5 - Use RIP-relative addressing ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/acompress.c81
-rw-r--r--crypto/aead.c98
-rw-r--r--crypto/ahash.c144
-rw-r--r--crypto/akcipher.c52
-rw-r--r--crypto/algapi.c219
-rw-r--r--crypto/algif_hash.c19
-rw-r--r--crypto/api.c63
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/compress.h26
-rw-r--r--crypto/cryptd.c34
-rw-r--r--crypto/crypto_user_stat.c183
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/fips.c11
-rw-r--r--crypto/hash.h40
-rw-r--r--crypto/hmac.c15
-rw-r--r--crypto/internal.h10
-rw-r--r--crypto/jitterentropy-kcapi.c51
-rw-r--r--crypto/jitterentropy.c144
-rw-r--r--crypto/jitterentropy.h1
-rw-r--r--crypto/kpp.c53
-rw-r--r--crypto/rng.c65
-rw-r--r--crypto/scompress.c39
-rw-r--r--crypto/shash.c181
-rw-r--r--crypto/skcipher.c113
-rw-r--r--crypto/tcrypt.c11
-rw-r--r--crypto/testmgr.c272
-rw-r--r--crypto/testmgr.h47
28 files changed, 1137 insertions, 851 deletions
diff --git a/crypto/acompress.c b/crypto/acompress.c
index c32c72048a1c..82a290df2822 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -6,25 +6,35 @@
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
+
+#include <crypto/internal/acompress.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
+
+struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
-#ifdef CONFIG_NET
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, calg.base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static int __maybe_unused crypto_acomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
@@ -34,12 +44,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
-#else
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -89,13 +93,44 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+ struct crypto_istat_compress *istat = comp_get_stat(calg);
+ struct crypto_stat_compress racomp;
+
+ memset(&racomp, 0, sizeof(racomp));
+
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
+ racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+ racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+ racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
+ racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+ racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_acomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -147,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
}
EXPORT_SYMBOL_GPL(acomp_request_free);
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
{
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
- base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/aead.c b/crypto/aead.c
index 16991095270d..ffc48a7dfb34 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -8,17 +8,27 @@
*/
#include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -80,39 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stats_aead_encrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
- ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stats_aead_decrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -142,8 +175,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-#ifdef CONFIG_NET
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_aead_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@@ -159,12 +192,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
-#else
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -188,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
+static int __maybe_unused crypto_aead_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+ struct crypto_istat_aead *istat = aead_get_stat(aead);
+ struct crypto_stat_aead raead;
+
+ memset(&raead, 0, sizeof(raead));
+
+ strscpy(raead.type, "aead", sizeof(raead.type));
+
+ raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@@ -195,7 +242,12 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_aead_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_aead_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@@ -219,6 +271,7 @@ EXPORT_SYMBOL_GPL(crypto_alloc_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
+ struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -232,6 +285,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ff8c79d975c1..b8a607928e72 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -8,19 +8,18 @@
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include "internal.h"
+#include "hash.h"
static const struct crypto_type crypto_ahash_type;
@@ -296,55 +295,60 @@ static int crypto_ahash_op(struct ahash_request *req,
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op, has_state);
+ err = ahash_op_unaligned(req, op, has_state);
+ else
+ err = op(req);
- return op(req);
+ return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
}
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&hash_get_stat(alg)->hash_cnt);
+
+ return crypto_ahash_op(req, tfm->final, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
+
+ return crypto_ahash_op(req, tfm->finup, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
- crypto_stats_get(alg);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_ahash_op(req, tfm->digest, false);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ return crypto_hash_errstat(alg, -ENOKEY);
+
+ return crypto_ahash_op(req, tfm->digest, false);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -465,8 +469,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
ahash->free(ahash);
}
-#ifdef CONFIG_NET
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_ahash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
@@ -479,12 +483,6 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -498,6 +496,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
+static int __maybe_unused crypto_ahash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@@ -505,7 +509,12 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_ahash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_ahash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
@@ -534,17 +543,70 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
+{
+ struct hash_alg_common *halg = crypto_hash_alg_common(hash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *nhash;
+ struct ahash_alg *alg;
+ int err;
+
+ if (!crypto_hash_alg_has_setkey(halg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
+
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->init = hash->init;
+ nhash->update = hash->update;
+ nhash->final = hash->final;
+ nhash->finup = hash->finup;
+ nhash->digest = hash->digest;
+ nhash->export = hash->export;
+ nhash->import = hash->import;
+ nhash->setkey = hash->setkey;
+ nhash->reqsize = hash->reqsize;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_clone_shash_ops_async(nhash, hash);
+
+ err = -ENOSYS;
+ alg = crypto_ahash_alg(hash);
+ if (!alg->clone_tfm)
+ goto out_free_nhash;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err)
+ goto out_free_nhash;
+
+ return nhash;
+
+out_free_nhash:
+ crypto_free_ahash(nhash);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
+ int err;
- if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
- alg->halg.statesize > HASH_MAX_STATESIZE ||
- alg->halg.statesize == 0)
+ if (alg->halg.statesize == 0)
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_ahash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index ab975a420e1e..186e762b509a 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -5,23 +5,20 @@
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
+#include <crypto/internal/akcipher.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <crypto/akcipher.h>
-#include <crypto/internal/akcipher.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_akcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -32,12 +29,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
-#else
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -76,6 +67,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
+static int __maybe_unused crypto_akcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+ struct crypto_istat_akcipher *istat;
+ struct crypto_stat_akcipher rakcipher;
+
+ istat = akcipher_get_stat(akcipher);
+
+ memset(&rakcipher, 0, sizeof(rakcipher));
+
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+ rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@@ -83,7 +98,12 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_akcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_akcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -108,11 +128,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d08f864f08be..d7eb8f9e9883 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -339,8 +339,6 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&alg->cra_list, &crypto_alg_list);
- crypto_stats_init(alg);
-
if (larval) {
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
@@ -493,7 +491,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+ return;
+
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -1038,219 +1038,6 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg)
-{
- memset(&alg->stats, 0, sizeof(alg->stats));
-}
-EXPORT_SYMBOL_GPL(crypto_stats_init);
-
-void crypto_stats_get(struct crypto_alg *alg)
-{
- crypto_alg_get(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_get);
-
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
-
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
-
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.sign_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
-
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.verify_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
-
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.compress_cnt);
- atomic64_add(slen, &alg->stats.compress.compress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_compress);
-
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.decompress_cnt);
- atomic64_add(slen, &alg->stats.compress.decompress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_decompress);
-
-void crypto_stats_ahash_update(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.hash.err_cnt);
- else
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
-
-void crypto_stats_ahash_final(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.hash.err_cnt);
- } else {
- atomic64_inc(&alg->stats.hash.hash_cnt);
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
-
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.setsecret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
-
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
-
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{
- if (ret)
- atomic64_inc(&alg->stats.kpp.err_cnt);
- else
- atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
-
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.rng.err_cnt);
- else
- atomic64_inc(&alg->stats.rng.seed_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
-
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.rng.err_cnt);
- } else {
- atomic64_inc(&alg->stats.rng.generate_cnt);
- atomic64_add(dlen, &alg->stats.rng.generate_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
-
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
-
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
-#endif
-
static void __init crypto_start_tests(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 1d017ec5c63c..63af72e19fa8 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -235,24 +235,31 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[HASH_MAX_STATESIZE];
+ struct crypto_ahash *tfm;
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
+ char *state;
bool more;
int err;
+ tfm = crypto_ahash_reqtfm(req);
+ state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!state)
+ goto out;
+
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
- return err;
+ goto out_free_state;
err = af_alg_accept(ask->parent, newsock, kern);
if (err)
- return err;
+ goto out_free_state;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
@@ -260,7 +267,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
ctx2->more = more;
if (!more)
- return err;
+ goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
@@ -268,6 +275,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
sock_put(sk2);
}
+out_free_state:
+ kfree_sensitive(state);
+
+out:
return err;
}
diff --git a/crypto/api.c b/crypto/api.c
index e67cc63368ed..d375e8cd770d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -408,6 +408,7 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
goto out_err;
tfm->__crt_alg = alg;
+ refcount_set(&tfm->refcnt, 1);
err = crypto_init_ops(tfm, type, mask);
if (err)
@@ -487,26 +488,43 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-void *crypto_create_tfm_node(struct crypto_alg *alg,
- const struct crypto_type *frontend,
- int node)
+static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
+ const struct crypto_type *frontend, int node,
+ gfp_t gfp)
{
- char *mem;
- struct crypto_tfm *tfm = NULL;
+ struct crypto_tfm *tfm;
unsigned int tfmsize;
unsigned int total;
- int err = -ENOMEM;
+ char *mem;
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
- mem = kzalloc_node(total, GFP_KERNEL, node);
+ mem = kzalloc_node(total, gfp, node);
if (mem == NULL)
- goto out_err;
+ return ERR_PTR(-ENOMEM);
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
tfm->node = node;
+ refcount_set(&tfm->refcnt, 1);
+
+ return mem;
+}
+
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend,
+ int node)
+{
+ struct crypto_tfm *tfm;
+ char *mem;
+ int err;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
+ if (IS_ERR(mem))
+ goto out;
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
err = frontend->init_tfm(tfm);
if (err)
@@ -523,13 +541,38 @@ out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(mem);
-out_err:
mem = ERR_PTR(err);
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm)
+{
+ struct crypto_alg *alg = otfm->__crt_alg;
+ struct crypto_tfm *tfm;
+ char *mem;
+
+ mem = ERR_PTR(-ESTALE);
+ if (unlikely(!crypto_mod_get(alg)))
+ goto out;
+
+ mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
+ if (IS_ERR(mem)) {
+ crypto_mod_put(alg);
+ goto out;
+ }
+
+ tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->crt_flags = otfm->crt_flags;
+ tfm->exit = otfm->exit;
+
+out:
+ return mem;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_tfm);
+
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
@@ -619,6 +662,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
if (IS_ERR_OR_NULL(mem))
return;
+ if (!refcount_dec_and_test(&tfm->refcnt))
+ return;
alg = tfm->__crt_alg;
if (!tfm->exit && alg->cra_exit)
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f9cdc5e91664..5e2b2680d7db 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -11,8 +11,8 @@
#include <linux/async_tx.h>
#include <linux/gfp.h>
-/**
- * pq_scribble_page - space to hold throwaway P or Q buffer for
+/*
+ * struct pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
@@ -28,7 +28,7 @@ static struct page *pq_scribble_page;
#define MAX_DISKS 255
-/**
+/*
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
@@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
return tx;
}
-/**
+/*
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
@@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
- * @offset: common offset into each block (src and dest) to start transaction
+ * @offsets: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 9256934312d7..ad72057a5e0d 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
- * submit_disposition - flags for routing an incoming operation
+ * enum submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
- * @tx - transaction to quiesce
+ * @tx: transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
diff --git a/crypto/compress.h b/crypto/compress.h
new file mode 100644
index 000000000000..19f65516d699
--- /dev/null
+++ b/crypto/compress.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_COMPRESS_H
+#define _LOCAL_CRYPTO_COMPRESS_H
+
+#include "internal.h"
+
+struct acomp_req;
+struct comp_alg_common;
+struct sk_buff;
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 37365ed30b38..bbcc368b6a55 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -427,12 +427,12 @@ err_free_inst:
return err;
}
-static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct ahash_instance *inst = ahash_alg_instance(tfm);
+ struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
@@ -440,15 +440,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
return PTR_ERR(hash);
ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
-static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
+ struct crypto_ahash *tfm)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(ctx->child);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ nctx->child = hash;
+ return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
crypto_free_shash(ctx->child);
}
@@ -677,8 +692,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
- inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.init_tfm = cryptd_hash_init_tfm;
+ inst->alg.clone_tfm = cryptd_hash_clone_tfm;
+ inst->alg.exit_tfm = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 154884bf9275..d4f3d39b5137 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -6,18 +6,14 @@
*
*/
-#include <linux/crypto.h>
-#include <linux/cryptouser.h>
-#include <linux/sched.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/cryptouser.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include <net/sock.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/rng.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
-
-#include "internal.h"
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
@@ -28,23 +24,6 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_aead raead;
-
- memset(&raead, 0, sizeof(raead));
-
- strscpy(raead.type, "aead", sizeof(raead.type));
-
- raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
- raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
- raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
- raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
- raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_cipher rcipher;
@@ -53,12 +32,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
- rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
-
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
@@ -69,112 +42,10 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
memset(&rcomp, 0, sizeof(rcomp));
strscpy(rcomp.type, "compression", sizeof(rcomp.type));
- rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
-static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_compress racomp;
-
- memset(&racomp, 0, sizeof(racomp));
-
- strscpy(racomp.type, "acomp", sizeof(racomp.type));
- racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
- racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
- racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
- racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_akcipher rakcipher;
-
- memset(&rakcipher, 0, sizeof(rakcipher));
-
- strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
- rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
- rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
- rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
- rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
- rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
- rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
- rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
- sizeof(rakcipher), &rakcipher);
-}
-
-static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_kpp rkpp;
-
- memset(&rkpp, 0, sizeof(rkpp));
-
- strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
- rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
- rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
- rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
-}
-
-static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "ahash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, "shash", sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_rng rrng;
-
- memset(&rrng, 0, sizeof(rrng));
-
- strscpy(rrng.type, "rng", sizeof(rrng.type));
-
- rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
- rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
- rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
- rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
-}
-
static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg,
struct sk_buff *skb)
@@ -204,15 +75,13 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
goto out;
}
- switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
- case CRYPTO_ALG_TYPE_AEAD:
- if (crypto_report_aead(skb, alg))
+ if (alg->cra_type && alg->cra_type->report_stat) {
+ if (alg->cra_type->report_stat(skb, alg))
goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SKCIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
+ goto out;
+ }
+
+ switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
@@ -221,34 +90,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_ACOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_SCOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AKCIPHER:
- if (crypto_report_akcipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_KPP:
- if (crypto_report_kpp(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- if (crypto_report_ahash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_HASH:
- if (crypto_report_shash(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_RNG:
- if (crypto_report_rng(skb, alg))
- goto nla_put_failure;
- break;
default:
pr_err("ERROR: Unhandled alg %d in %s\n",
alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 982d4ca4526d..ff4ebbc68efa 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
- if (fips_enabled || err != -ENOENT)
+ if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
diff --git a/crypto/fips.c b/crypto/fips.c
index b05d3c7b3ca5..92fd506abb21 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -66,20 +66,11 @@ static struct ctl_table crypto_sysctl_table[] = {
{}
};
-static struct ctl_table crypto_dir_table[] = {
- {
- .procname = "crypto",
- .mode = 0555,
- .child = crypto_sysctl_table
- },
- {}
-};
-
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
- crypto_sysctls = register_sysctl_table(crypto_dir_table);
+ crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
}
static void crypto_proc_fips_exit(void)
diff --git a/crypto/hash.h b/crypto/hash.h
new file mode 100644
index 000000000000..7e6c1a948692
--- /dev/null
+++ b/crypto/hash.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_HASH_H
+#define _LOCAL_CRYPTO_HASH_H
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
+
+#include "internal.h"
+
+static inline int crypto_hash_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg,
+ const char *type)
+{
+ struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
+ struct crypto_istat_hash *istat = hash_get_stat(halg);
+ struct crypto_stat_hash rhash;
+
+ memset(&rhash, 0, sizeof(rhash));
+
+ strscpy(rhash.type, type, sizeof(rhash.type));
+
+ rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
+ rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
+ rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
+}
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+ struct crypto_ahash *hash);
+
+int hash_prepare_alg(struct hash_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_HASH_H */
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 3610ff0b6739..09a7872b4060 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -160,6 +160,20 @@ static int hmac_init_tfm(struct crypto_shash *parent)
return 0;
}
+static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
+{
+ struct hmac_ctx *sctx = hmac_ctx(src);
+ struct hmac_ctx *dctx = hmac_ctx(dst);
+ struct crypto_shash *hash;
+
+ hash = crypto_clone_shash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
static void hmac_exit_tfm(struct crypto_shash *parent)
{
struct hmac_ctx *ctx = hmac_ctx(parent);
@@ -227,6 +241,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
+ inst->alg.clone_tfm = hmac_clone_tfm;
inst->alg.exit_tfm = hmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
diff --git a/crypto/internal.h b/crypto/internal.h
index 932f0aafddc3..8dd746b1130b 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <linux/completion.h>
+#include <linux/err.h>
#include <linux/jump_label.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -47,6 +48,8 @@ extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
static inline bool crypto_boot_test_finished(void)
{
@@ -103,6 +106,8 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+ struct crypto_tfm *otfm);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -184,5 +189,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval)
return larval->alg.cra_driver_name[0];
}
+static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm)
+{
+ return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW);
+}
+
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 2d115bec15ae..b9edfaa51b27 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -37,6 +37,7 @@
* DAMAGE.
*/
+#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -59,11 +60,6 @@ void jent_zfree(void *ptr)
kfree_sensitive(ptr);
}
-void jent_panic(char *s)
-{
- panic("%s", s);
-}
-
void jent_memcpy(void *dest, const void *src, unsigned int n)
{
memcpy(dest, src, n);
@@ -102,7 +98,6 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
- unsigned int reset_cnt;
};
static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -138,32 +133,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
spin_lock(&rng->jent_lock);
- /* Return a permanent error in case we had too many resets in a row. */
- if (rng->reset_cnt > (1<<10)) {
- ret = -EFAULT;
- goto out;
- }
-
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
- /* Reset RNG in case of health failures */
- if (ret < -1) {
- pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
- (ret == -2) ? "Repetition Count Test" :
- "Adaptive Proportion Test");
-
- rng->reset_cnt++;
-
+ if (ret == -3) {
+ /* Handle permanent health test error */
+ /*
+ * If the kernel was booted with fips=1, it implies that
+ * the entire kernel acts as a FIPS 140 module. In this case
+ * an SP800-90B permanent health test error is treated as
+ * a FIPS module error.
+ */
+ if (fips_enabled)
+ panic("Jitter RNG permanent health test failure\n");
+
+ pr_err("Jitter RNG permanent health test failure\n");
+ ret = -EFAULT;
+ } else if (ret == -2) {
+ /* Handle intermittent health test error */
+ pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
- } else {
- rng->reset_cnt = 0;
-
- /* Convert the Jitter RNG error into a usable error code */
- if (ret == -1)
- ret = -EINVAL;
+ } else if (ret == -1) {
+ /* Handle other errors */
+ ret = -EINVAL;
}
-out:
spin_unlock(&rng->jent_lock);
return ret;
@@ -197,6 +190,10 @@ static int __init jent_mod_init(void)
ret = jent_entropy_init();
if (ret) {
+ /* Handle permanent health test error */
+ if (fips_enabled)
+ panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 93bff3213823..22f48bf4c6f5 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -85,10 +85,14 @@ struct rand_data {
* bit generation */
/* Repetition Count Test */
- int rct_count; /* Number of stuck values */
+ unsigned int rct_count; /* Number of stuck values */
- /* Adaptive Proportion Test for a significance level of 2^-30 */
+ /* Intermittent health test failure threshold of 2^-30 */
+#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+ /* Permanent health test failure threshold of 2^-60 */
+#define JENT_RCT_CUTOFF_PERMANENT 60
+#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
@@ -97,8 +101,6 @@ struct rand_data {
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int apt_base_set:1; /* APT base reference set? */
-
- unsigned int health_failure:1; /* Permanent health failure */
};
/* Flags that can be used to initialize the RNG */
@@ -169,19 +171,26 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
return;
}
- if (delta_masked == ec->apt_base) {
+ if (delta_masked == ec->apt_base)
ec->apt_count++;
- if (ec->apt_count >= JENT_APT_CUTOFF)
- ec->health_failure = 1;
- }
-
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
+/* APT health test failure detection */
+static int jent_apt_permanent_failure(struct rand_data *ec)
+{
+ return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_apt_failure(struct rand_data *ec)
+{
+ return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
+}
+
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
@@ -206,55 +215,14 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
- /*
- * If we have a count less than zero, a previous RCT round identified
- * a failure. We will not overwrite it.
- */
- if (ec->rct_count < 0)
- return;
-
if (stuck) {
ec->rct_count++;
-
- /*
- * The cutoff value is based on the following consideration:
- * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
- * In addition, we require an entropy value H of 1/OSR as this
- * is the minimum entropy required to provide full entropy.
- * Note, we collect 64 * OSR deltas for inserting them into
- * the entropy pool which should then have (close to) 64 bits
- * of entropy.
- *
- * Note, ec->rct_count (which equals to value B in the pseudo
- * code of SP800-90B section 4.4.1) starts with zero. Hence
- * we need to subtract one from the cutoff value as calculated
- * following SP800-90B.
- */
- if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
- ec->rct_count = -1;
- ec->health_failure = 1;
- }
} else {
+ /* Reset RCT */
ec->rct_count = 0;
}
}
-/*
- * Is there an RCT health test failure?
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
- */
-static int jent_rct_failure(struct rand_data *ec)
-{
- if (ec->rct_count < 0)
- return 1;
- return 0;
-}
-
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
@@ -303,18 +271,26 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
return 0;
}
-/*
- * Report any health test failures
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- * 0 No health test failure
- * 1 Permanent health test failure
- */
+/* RCT health test failure detection */
+static int jent_rct_permanent_failure(struct rand_data *ec)
+{
+ return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_rct_failure(struct rand_data *ec)
+{
+ return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
+}
+
+/* Report of health test failures */
static int jent_health_failure(struct rand_data *ec)
{
- return ec->health_failure;
+ return jent_rct_failure(ec) | jent_apt_failure(ec);
+}
+
+static int jent_permanent_health_failure(struct rand_data *ec)
+{
+ return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
}
/***************************************************************************
@@ -600,8 +576,8 @@ static void jent_gen_entropy(struct rand_data *ec)
*
* The following error codes can occur:
* -1 entropy_collector is NULL
- * -2 RCT failed
- * -3 APT test failed
+ * -2 Intermittent health failure
+ * -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
@@ -616,39 +592,23 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
jent_gen_entropy(ec);
- if (jent_health_failure(ec)) {
- int ret;
-
- if (jent_rct_failure(ec))
- ret = -2;
- else
- ret = -3;
-
+ if (jent_permanent_health_failure(ec)) {
/*
- * Re-initialize the noise source
- *
- * If the health test fails, the Jitter RNG remains
- * in failure state and will return a health failure
- * during next invocation.
+ * At this point, the Jitter RNG instance is considered
+ * as a failed instance. There is no rerun of the
+ * startup test any more, because the caller
+ * is assumed to not further use this instance.
*/
- if (jent_entropy_init())
- return ret;
-
- /* Set APT to initial state */
- jent_apt_reset(ec, 0);
- ec->apt_base_set = 0;
-
- /* Set RCT to initial state */
- ec->rct_count = 0;
-
- /* Re-enable Jitter RNG */
- ec->health_failure = 0;
-
+ return -3;
+ } else if (jent_health_failure(ec)) {
/*
- * Return the health test failure status to the
- * caller as the generated value is not appropriate.
+ * Perform startup health tests and return permanent
+ * error if it fails.
*/
- return ret;
+ if (jent_entropy_init())
+ return -3;
+
+ return -2;
}
if ((DATA_SIZE_BITS / 8) < len)
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
index b7397b617ef0..5cc583f6bc6b 100644
--- a/crypto/jitterentropy.h
+++ b/crypto/jitterentropy.h
@@ -2,7 +2,6 @@
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
-extern void jent_panic(char *s);
extern void jent_memcpy(void *dest, const void *src, unsigned int n);
extern void jent_get_nstime(__u64 *out);
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 678e871ce418..74f2e8e918fa 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -5,23 +5,20 @@
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*/
+
+#include <crypto/internal/kpp.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_kpp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
@@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
-#else
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -75,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
kpp->free(kpp);
}
+static int __maybe_unused crypto_kpp_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct kpp_alg *kpp = __crypto_kpp_alg(alg);
+ struct crypto_istat_kpp *istat;
+ struct crypto_stat_kpp rkpp;
+
+ istat = kpp_get_stat(kpp);
+
+ memset(&rkpp, 0, sizeof(rkpp));
+
+ strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
+ rkpp.stat_generate_public_key_cnt =
+ atomic64_read(&istat->generate_public_key_cnt);
+ rkpp.stat_compute_shared_secret_cnt =
+ atomic64_read(&istat->compute_shared_secret_cnt);
+ rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
+}
+
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
@@ -82,7 +96,12 @@ static const struct crypto_type crypto_kpp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_kpp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_kpp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
@@ -112,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
+ struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)
diff --git a/crypto/rng.c b/crypto/rng.c
index fea082b25fe4..ffde0f64fb25 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -8,17 +8,17 @@
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <linux/atomic.h>
#include <crypto/internal/rng.h>
+#include <linux/atomic.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
#include "internal.h"
@@ -30,27 +30,30 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&rng_get_stat(alg)->seed_cnt);
+
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
+ err = -ENOMEM;
if (!buf)
- return -ENOMEM;
+ goto out;
err = get_random_bytes_wait(buf, slen);
if (err)
- goto out;
+ goto free_buf;
seed = buf;
}
- crypto_stats_get(alg);
- err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- crypto_stats_rng_seed(alg, err);
-out:
+ err = alg->seed(tfm, seed, slen);
+free_buf:
kfree_sensitive(buf);
- return err;
+out:
+ return crypto_rng_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
@@ -66,8 +69,8 @@ static unsigned int seedsize(struct crypto_alg *alg)
return ralg->seedsize;
}
-#ifdef CONFIG_NET
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_rng_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
@@ -79,12 +82,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
-#else
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -94,13 +91,39 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
+static int __maybe_unused crypto_rng_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct rng_alg *rng = __crypto_rng_alg(alg);
+ struct crypto_istat_rng *istat;
+ struct crypto_stat_rng rrng;
+
+ istat = rng_get_stat(rng);
+
+ memset(&rrng, 0, sizeof(rrng));
+
+ strscpy(rrng.type, "rng", sizeof(rrng.type));
+
+ rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
+ rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
+ rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
+ rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
+}
+
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_rng_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_rng_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
@@ -176,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
int crypto_register_rng(struct rng_alg *alg)
{
+ struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
@@ -185,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 738f4f8f0f41..24138b42a648 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -6,23 +6,22 @@
* Copyright (c) 2016, Intel Corporation
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <linux/errno.h>
+
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
#include <linux/vmalloc.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
@@ -38,8 +37,8 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
-#ifdef CONFIG_NET
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_scomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
@@ -50,12 +49,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(rscomp), &rscomp);
}
-#else
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -247,7 +240,12 @@ static const struct crypto_type crypto_scomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_scomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
@@ -256,10 +254,11 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
- struct crypto_alg *base = &alg->base;
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_scomp_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/shash.c b/crypto/shash.c
index 58b46f198449..5845b7d59b2f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -6,22 +6,31 @@
*/
#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include <linux/compiler.h>
-#include "internal.h"
+#include "hash.h"
#define MAX_SHASH_ALIGNMASK 63
static const struct crypto_type crypto_shash_type;
+static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
+{
+ return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
+{
+ return crypto_hash_errstat(&alg->halg, err);
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -114,11 +123,17 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
if ((unsigned long)data & alignmask)
- return shash_update_unaligned(desc, data, len);
+ err = shash_update_unaligned(desc, data, len);
+ else
+ err = shash->update(desc, data, len);
- return shash->update(desc, data, len);
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
@@ -155,19 +170,25 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out)
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&shash_get_stat(shash)->hash_cnt);
if ((unsigned long)out & alignmask)
- return shash_final_unaligned(desc, out);
+ err = shash_final_unaligned(desc, out);
+ else
+ err = shash->final(desc, out);
- return shash->final(desc, out);
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
+ return shash_update_unaligned(desc, data, len) ?:
+ shash_final_unaligned(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
@@ -176,11 +197,22 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_finup_unaligned(desc, data, len, out);
+ err = shash_finup_unaligned(desc, data, len, out);
+ else
+ err = shash->finup(desc, data, len, out);
- return shash->finup(desc, data, len, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
@@ -188,7 +220,8 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
- crypto_shash_finup(desc, data, len, out);
+ shash_update_unaligned(desc, data, len) ?:
+ shash_final_unaligned(desc, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -197,14 +230,23 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
- if (((unsigned long)data | (unsigned long)out) & alignmask)
- return shash_digest_unaligned(desc, data, len, out);
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
- return shash->digest(desc, data, len, out);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ err = -ENOKEY;
+ else if (((unsigned long)data | (unsigned long)out) & alignmask)
+ err = shash_digest_unaligned(desc, data, len, out);
+ else
+ err = shash->digest(desc, data, len, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -403,6 +445,24 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
return 0;
}
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+ struct crypto_ahash *hash)
+{
+ struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
+ struct crypto_shash **ctx = crypto_ahash_ctx(hash);
+ struct crypto_shash *shash;
+
+ shash = crypto_clone_shash(*ctx);
+ if (IS_ERR(shash)) {
+ crypto_free_ahash(nhash);
+ return ERR_CAST(shash);
+ }
+
+ *nctx = shash;
+
+ return nhash;
+}
+
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -448,8 +508,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst)
shash->free(shash);
}
-#ifdef CONFIG_NET
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_shash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
@@ -463,12 +523,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -481,6 +535,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
+static int __maybe_unused crypto_shash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "shash");
+}
+
static const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
@@ -488,7 +548,12 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_shash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_shash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
@@ -517,13 +582,62 @@ int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_shash);
-static int shash_prepare_alg(struct shash_alg *alg)
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
+{
+ struct crypto_tfm *tfm = crypto_shash_tfm(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *nhash;
+ int err;
+
+ if (!crypto_shash_alg_has_setkey(alg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ if (!alg->clone_tfm)
+ return ERR_PTR(-ENOSYS);
+
+ nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->descsize = hash->descsize;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err) {
+ crypto_free_shash(nhash);
+ return ERR_PTR(err);
+ }
+
+ return nhash;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_shash);
+
+int hash_prepare_alg(struct hash_alg_common *alg)
{
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
- if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
- alg->descsize > HASH_MAX_DESCSIZE ||
- alg->statesize > HASH_MAX_STATESIZE)
+ if (alg->digestsize > HASH_MAX_DIGESTSIZE)
+ return -EINVAL;
+
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
+ return 0;
+}
+
+static int shash_prepare_alg(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
@@ -532,8 +646,11 @@ static int shash_prepare_alg(struct shash_alg *alg)
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_shash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
@@ -543,7 +660,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
- alg->statesize = alg->descsize;
+ alg->halg.statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7bf4871fec80..6caca02d7e55 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -15,11 +15,14 @@
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
@@ -77,6 +80,35 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct skcipher_alg, base);
+}
+
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+ struct skcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -605,34 +637,44 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->encrypt(req);
- crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->decrypt(req);
- crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@@ -672,8 +714,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
@@ -686,12 +727,11 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "walksize : %u\n", skcipher->walksize);
}
-#ifdef CONFIG_NET
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_skcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
@@ -706,12 +746,28 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
-#else
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+
+static int __maybe_unused crypto_skcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
- return -ENOSYS;
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = skcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
-#endif
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_alg_extsize,
@@ -720,7 +776,12 @@ static const struct crypto_type crypto_skcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_skcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_skcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -775,6 +836,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -790,6 +852,9 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 6521feec7756..202ca1a3105d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -25,14 +25,17 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
-#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/moduleparam.h>
-#include <linux/jiffies.h>
#include <linux/timex.h>
-#include <linux/interrupt.h>
+
+#include "internal.h"
#include "tcrypt.h"
/*
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c91e93ece20b..216878c8bc3d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -860,12 +860,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
+ * need cryptographically secure random numbers. This greatly improves the
+ * performance of these tests, especially if they are run before the Linux RNG
+ * has been initialized or if they are run on a lockdep-enabled kernel.
+ */
+
+static inline void init_rnd_state(struct rnd_state *rng)
+{
+ prandom_seed_state(rng, get_random_u64());
+}
+
+static inline u8 prandom_u8(struct rnd_state *rng)
+{
+ return prandom_u32_state(rng);
+}
+
+static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
+{
+ /*
+ * This is slightly biased for non-power-of-2 values of 'ceil', but this
+ * isn't important here.
+ */
+ return prandom_u32_state(rng) % ceil;
+}
+
+static inline bool prandom_bool(struct rnd_state *rng)
+{
+ return prandom_u32_below(rng, 2);
+}
+
+static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
+ u32 floor, u32 ceil)
+{
+ return floor + prandom_u32_below(rng, ceil - floor + 1);
+}
+
/* Generate a random length in range [0, max_len], but prefer smaller values */
-static unsigned int generate_random_length(unsigned int max_len)
+static unsigned int generate_random_length(struct rnd_state *rng,
+ unsigned int max_len)
{
- unsigned int len = get_random_u32_below(max_len + 1);
+ unsigned int len = prandom_u32_below(rng, max_len + 1);
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
return len % 64;
case 1:
@@ -878,43 +916,44 @@ static unsigned int generate_random_length(unsigned int max_len)
}
/* Flip a random bit in the given nonempty data buffer */
-static void flip_random_bit(u8 *buf, size_t size)
+static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
- bitpos = get_random_u32_below(size * 8);
+ bitpos = prandom_u32_below(rng, size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
-static void flip_random_byte(u8 *buf, size_t size)
+static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
- buf[get_random_u32_below(size)] ^= 0xff;
+ buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
-static void mutate_buffer(u8 *buf, size_t size)
+static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */
- if (get_random_u32_below(4) == 0) {
- num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
+ size * 8);
for (i = 0; i < num_flips; i++)
- flip_random_bit(buf, size);
+ flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */
- if (get_random_u32_below(4) == 0) {
- num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
+ if (prandom_u32_below(rng, 4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
for (i = 0; i < num_flips; i++)
- flip_random_byte(buf, size);
+ flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
-static void generate_random_bytes(u8 *buf, size_t count)
+static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
@@ -923,11 +962,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0)
return;
- switch (get_random_u32_below(8)) { /* Choose a generation strategy */
+ switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
b = 0x00;
break;
@@ -935,28 +974,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff;
break;
default:
- b = get_random_u8();
+ b = prandom_u8(rng);
break;
}
memset(buf, b, count);
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
- increment = get_random_u8();
- b = get_random_u8();
+ increment = prandom_u8(rng);
+ b = prandom_u8(rng);
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
- mutate_buffer(buf, count);
+ mutate_buffer(rng, buf, count);
break;
default:
/* Fully random bytes */
- for (i = 0; i < count; i++)
- buf[i] = get_random_u8();
+ prandom_bytes_state(rng, buf, count);
}
}
-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+static char *generate_random_sgl_divisions(struct rnd_state *rng,
+ struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
bool gen_flushes, u32 req_flags)
{
@@ -967,24 +1006,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len;
const char *flushtype_str;
- if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
+ if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
else
- this_len = get_random_u32_inclusive(1, remaining);
+ this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
- if (get_random_u32_below(4) == 0)
- div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
- else if (get_random_u32_below(2) == 0)
- div->offset = get_random_u32_below(32);
+ if (prandom_u32_below(rng, 4) == 0)
+ div->offset = prandom_u32_inclusive(rng,
+ PAGE_SIZE - 128,
+ PAGE_SIZE - 1);
+ else if (prandom_bool(rng))
+ div->offset = prandom_u32_below(rng, 32);
else
- div->offset = get_random_u32_below(PAGE_SIZE);
- if (get_random_u32_below(8) == 0)
+ div->offset = prandom_u32_below(rng, PAGE_SIZE);
+ if (prandom_u32_below(rng, 8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
@@ -996,7 +1037,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- get_random_u32_below(2) == 0)
+ prandom_bool(rng))
div->nosimd = true;
switch (div->flush_type) {
@@ -1031,7 +1072,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
/* Generate a random testvec_config for fuzz testing */
-static void generate_random_testvec_config(struct testvec_config *cfg,
+static void generate_random_testvec_config(struct rnd_state *rng,
+ struct testvec_config *cfg,
char *name, size_t max_namelen)
{
char *p = name;
@@ -1043,7 +1085,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
@@ -1058,12 +1100,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (get_random_u32_below(2) == 0) {
+ if (prandom_bool(rng)) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
- switch (get_random_u32_below(4)) {
+ switch (prandom_u32_below(rng, 4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
@@ -1078,36 +1120,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- get_random_u32_below(2) == 0) {
+ if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
p += scnprintf(p, end - p, " src_divs=[");
- p = generate_random_sgl_divisions(cfg->src_divs,
+ p = generate_random_sgl_divisions(rng, cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
FINALIZATION_TYPE_DIGEST),
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
p += scnprintf(p, end - p, " dst_divs=[");
- p = generate_random_sgl_divisions(cfg->dst_divs,
+ p = generate_random_sgl_divisions(rng, cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p, end, false,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
- if (get_random_u32_below(2) == 0) {
- cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->iv_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
- if (get_random_u32_below(2) == 0) {
- cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+ if (prandom_bool(rng)) {
+ cfg->key_offset = prandom_u32_inclusive(rng, 1,
+ MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
@@ -1620,11 +1663,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
@@ -1642,15 +1688,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_hash_testvec(struct shash_desc *desc,
+static void generate_random_hash_testvec(struct rnd_state *rng,
+ struct shash_desc *desc,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
/* Data */
- vec->psize = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+ vec->psize = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/*
* Key: length in range [1, maxkeysize], but usually choose maxkeysize.
@@ -1660,9 +1707,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->ksize = get_random_u32_inclusive(1, maxkeysize);
- generate_random_bytes((u8 *)vec->key, vec->ksize);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
vec->ksize);
@@ -1696,6 +1743,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_shash *generic_tfm = NULL;
struct shash_desc *generic_desc = NULL;
@@ -1709,6 +1757,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
+ init_rnd_state(&rng);
+
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -1777,10 +1827,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_desc, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
@@ -2182,11 +2233,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2202,6 +2256,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
struct aead_extra_tests_ctx {
+ struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
const struct alg_test_desc *test_desc;
@@ -2220,24 +2275,26 @@ struct aead_extra_tests_ctx {
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+static void mutate_aead_message(struct rnd_state *rng,
+ struct aead_testvec *vec, bool aad_iv,
unsigned int ivsize)
{
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
- if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
+ if (prandom_bool(rng) && vec->alen > aad_tail_size) {
/* Mutate the AAD */
- flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
- if (get_random_u32_below(2) == 0)
+ flip_random_bit(rng, (u8 *)vec->assoc,
+ vec->alen - aad_tail_size);
+ if (prandom_bool(rng))
return;
}
- if (get_random_u32_below(2) == 0) {
+ if (prandom_bool(rng)) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
- flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
+ flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
} else {
/* Mutate any part of the ciphertext */
- flip_random_bit((u8 *)vec->ctext, vec->clen);
+ flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
}
}
@@ -2248,7 +2305,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
*/
#define MIN_COLLISION_FREE_AUTHSIZE 8
-static void generate_aead_message(struct aead_request *req,
+static void generate_aead_message(struct rnd_state *rng,
+ struct aead_request *req,
const struct aead_test_suite *suite,
struct aead_testvec *vec,
bool prefer_inauthentic)
@@ -2257,17 +2315,18 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
- (prefer_inauthentic || get_random_u32_below(4) == 0);
+ (prefer_inauthentic ||
+ prandom_u32_below(rng, 4) == 0);
/* Generate the AAD. */
- generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
if (suite->aad_iv && vec->alen >= ivsize)
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
- if (inauthentic && get_random_u32_below(2) == 0) {
+ if (inauthentic && prandom_bool(rng)) {
/* Generate a random ciphertext. */
- generate_random_bytes((u8 *)vec->ctext, vec->clen);
+ generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
} else {
int i = 0;
struct scatterlist src[2], dst;
@@ -2279,7 +2338,7 @@ static void generate_aead_message(struct aead_request *req,
if (vec->alen)
sg_set_buf(&src[i++], vec->assoc, vec->alen);
if (vec->plen) {
- generate_random_bytes((u8 *)vec->ptext, vec->plen);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
sg_set_buf(&src[i++], vec->ptext, vec->plen);
}
sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
@@ -2299,7 +2358,7 @@ static void generate_aead_message(struct aead_request *req,
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
- mutate_aead_message(vec, suite->aad_iv, ivsize);
+ mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
@@ -2313,7 +2372,8 @@ static void generate_aead_message(struct aead_request *req,
* If 'prefer_inauthentic' is true, then this function will generate inauthentic
* test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
-static void generate_random_aead_testvec(struct aead_request *req,
+static void generate_random_aead_testvec(struct rnd_state *rng,
+ struct aead_request *req,
struct aead_testvec *vec,
const struct aead_test_suite *suite,
unsigned int maxkeysize,
@@ -2329,18 +2389,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->klen = get_random_u32_below(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
- if (get_random_u32_below(4) == 0)
- authsize = get_random_u32_below(maxauthsize + 1);
+ if (prandom_u32_below(rng, 4) == 0)
+ authsize = prandom_u32_below(rng, maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
@@ -2349,11 +2409,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
/* AAD, plaintext, and ciphertext lengths */
- total_len = generate_random_length(maxdatasize);
- if (get_random_u32_below(4) == 0)
+ total_len = generate_random_length(rng, maxdatasize);
+ if (prandom_u32_below(rng, 4) == 0)
vec->alen = 0;
else
- vec->alen = generate_random_length(total_len);
+ vec->alen = generate_random_length(rng, total_len);
vec->plen = total_len - vec->alen;
vec->clen = vec->plen + authsize;
@@ -2364,7 +2424,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
vec->novrfy = 0;
vec->crypt_error = 0;
if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
- generate_aead_message(req, suite, vec, prefer_inauthentic);
+ generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
"\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
@@ -2376,7 +2436,7 @@ static void try_to_generate_inauthentic_testvec(
int i;
for (i = 0; i < 10; i++) {
- generate_random_aead_testvec(ctx->req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
@@ -2407,7 +2467,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
*/
try_to_generate_inauthentic_testvec(ctx);
if (ctx->vec.novrfy) {
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
@@ -2497,12 +2558,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
* the other implementation against them.
*/
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_aead_testvec(generic_req, &ctx->vec,
+ generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), false);
- generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+ ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
@@ -2541,6 +2603,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ init_rnd_state(&ctx->rng);
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->test_desc = test_desc;
@@ -2930,11 +2993,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
+ struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
+ init_rnd_state(&rng);
+
for (i = 0; i < fuzz_iterations; i++) {
- generate_random_testvec_config(&cfg, cfgname,
+ generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
@@ -2952,7 +3018,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
-static void generate_random_cipher_testvec(struct skcipher_request *req,
+static void generate_random_cipher_testvec(struct rnd_state *rng,
+ struct skcipher_request *req,
struct cipher_testvec *vec,
unsigned int maxdatasize,
char *name, size_t max_namelen)
@@ -2966,17 +3033,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (get_random_u32_below(4) == 0)
- vec->klen = get_random_u32_below(maxkeysize + 1);
- generate_random_bytes((u8 *)vec->key, vec->klen);
+ if (prandom_u32_below(rng, 4) == 0)
+ vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+ generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
/* IV */
- generate_random_bytes((u8 *)vec->iv, ivsize);
+ generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Plaintext */
- vec->len = generate_random_length(maxdatasize);
- generate_random_bytes((u8 *)vec->ptext, vec->len);
+ vec->len = generate_random_length(rng, maxdatasize);
+ generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
/* If the key couldn't be set, no need to continue to encrypt. */
if (vec->setkey_error)
@@ -3018,6 +3085,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
+ struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
@@ -3035,6 +3103,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (strncmp(algname, "kw(", 3) == 0)
return 0;
+ init_rnd_state(&rng);
+
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -3119,9 +3189,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+ generate_random_cipher_testvec(&rng, generic_req, &vec,
+ maxdatasize,
vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ generate_random_testvec_config(&rng, cfg, cfgname,
+ sizeof(cfgname));
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
@@ -4573,6 +4645,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(aes_cmac128_tv_template)
}
}, {
+ .alg = "cmac(camellia)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(camellia_cmac128_tv_template)
+ }
+ }, {
.alg = "cmac(des3_ede)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index f10bfb9d9973..5ca7a412508f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25665,6 +25665,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
/*
* CAMELLIA test vectors.
*/
+static const struct hash_testvec camellia_cmac128_tv_template[] = {
+ { /* From draft-kato-ipsec-camellia-cmac96and128-01 */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = zeroed_string,
+ .digest = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9"
+ "\xa0\x0f\x89\x64\x80\x94\xfc\x71",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+ .digest = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5"
+ "\x6d\x7d\x45\xa9\x5e\xe1\x79\x93",
+ .psize = 16,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
+ .digest = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61"
+ "\x44\xac\x18\x66\x13\x1d\x9f\x22",
+ .psize = 40,
+ .ksize = 16,
+ }, {
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .digest = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d"
+ "\x93\x9a\x8a\x4e\x19\x46\x6e\xe9",
+ .psize = 64,
+ .ksize = 16,
+ }
+};
static const struct cipher_testvec camellia_tv_template[] = {
{
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"