From 6c4fed5fee42f5785e881ef2c28359724b18b80e Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Mon, 15 Sep 2025 19:00:25 +0530 Subject: crypto: drbg - Export CTR DRBG DF functions Export drbg_ctr_df() derivative function to new module df_sp80090. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- include/crypto/df_sp80090a.h | 27 +++++++++++++++++++++ include/crypto/drbg.h | 25 +------------------ include/crypto/internal/drbg.h | 54 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 24 deletions(-) create mode 100644 include/crypto/df_sp80090a.h create mode 100644 include/crypto/internal/drbg.h (limited to 'include') diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h new file mode 100644 index 000000000000..182865538662 --- /dev/null +++ b/include/crypto/df_sp80090a.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright Stephan Mueller , 2014 + */ + +#ifndef _CRYPTO_DF80090A_H +#define _CRYPTO_DF80090A_H + +#include + +static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen) +{ + return statelen + /* df_data */ + blocklen + /* pad */ + blocklen + /* iv */ + statelen + blocklen; /* temp */ +} + +int crypto_drbg_ctr_df(struct crypto_cipher *tfm, + unsigned char *df_data, + size_t bytes_to_return, + struct list_head *seedlist, + u8 blocklen_bytes, + u8 statelen); + +#endif /* _CRYPTO_DF80090A_H */ diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index af5ad51d3eef..2d42518cbdce 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -54,30 +55,6 @@ #include #include -/* - * Concatenation Helper and string operation helper - * - * SP800-90A requires the concatenation of different data. To avoid copying - * buffers around or allocate additional memory, the following data structure - * is used to point to the original memory with its size. In addition, it - * is used to build a linked list. The linked list defines the concatenation - * of individual buffers. The order of memory block referenced in that - * linked list determines the order of concatenation. - */ -struct drbg_string { - const unsigned char *buf; - size_t len; - struct list_head list; -}; - -static inline void drbg_string_fill(struct drbg_string *string, - const unsigned char *buf, size_t len) -{ - string->buf = buf; - string->len = len; - INIT_LIST_HEAD(&string->list); -} - struct drbg_state; typedef uint32_t drbg_flag_t; diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h new file mode 100644 index 000000000000..371e52dcee6c --- /dev/null +++ b/include/crypto/internal/drbg.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * NIST SP800-90A DRBG derivation function + * + * Copyright (C) 2014, Stephan Mueller + */ + +#ifndef _INTERNAL_DRBG_H +#define _INTERNAL_DRBG_H + +/* + * Convert an integer into a byte representation of this integer. + * The byte representation is big-endian + * + * @val value to be converted + * @buf buffer holding the converted integer -- caller must ensure that + * buffer size is at least 32 bit + */ +static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf) +{ + struct s { + __be32 conv; + }; + struct s *conversion = (struct s *)buf; + + conversion->conv = cpu_to_be32(val); +} + +/* + * Concatenation Helper and string operation helper + * + * SP800-90A requires the concatenation of different data. To avoid copying + * buffers around or allocate additional memory, the following data structure + * is used to point to the original memory with its size. In addition, it + * is used to build a linked list. The linked list defines the concatenation + * of individual buffers. The order of memory block referenced in that + * linked list determines the order of concatenation. + */ +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +static inline void drbg_string_fill(struct drbg_string *string, + const unsigned char *buf, size_t len) +{ + string->buf = buf; + string->len = len; + INIT_LIST_HEAD(&string->list); +} + +#endif //_INTERNAL_DRBG_H -- cgit From ba0570bdf1d9956a63db2ddc50fa6a78d8c93f30 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Mon, 15 Sep 2025 19:00:26 +0530 Subject: crypto: drbg - Replace AES cipher calls with library calls Replace aes used in drbg with library calls. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- include/crypto/df_sp80090a.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h index 182865538662..6b25305fe611 100644 --- a/include/crypto/df_sp80090a.h +++ b/include/crypto/df_sp80090a.h @@ -8,6 +8,7 @@ #define _CRYPTO_DF80090A_H #include +#include static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen) { @@ -17,7 +18,7 @@ static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen) statelen + blocklen; /* temp */ } -int crypto_drbg_ctr_df(struct crypto_cipher *tfm, +int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes, unsigned char *df_data, size_t bytes_to_return, struct list_head *seedlist, -- cgit From a703a4c2a3280835003d4d0eb8845bac0f1a6ef1 Mon Sep 17 00:00:00 2001 From: Meenakshi Aggarwal Date: Mon, 6 Oct 2025 09:17:52 +0200 Subject: KEYS: trusted: caam based protected key - CAAM supports two types of protected keys: -- Plain key encrypted with ECB -- Plain key encrypted with CCM Due to robustness, default encryption used for protected key is CCM. - Generate protected key blob and add it to trusted key payload. This is done as part of sealing operation, which is triggered when below two operations are requested: -- new key generation -- load key, Signed-off-by: Pankaj Gupta Signed-off-by: Meenakshi Aggarwal Signed-off-by: Herbert Xu --- include/soc/fsl/caam-blob.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'include') diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h index 937cac52f36d..922f7ec3e231 100644 --- a/include/soc/fsl/caam-blob.h +++ b/include/soc/fsl/caam-blob.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 Pengutronix, Ahmad Fatoum + * Copyright 2024-2025 NXP */ #ifndef __CAAM_BLOB_GEN @@ -12,11 +13,34 @@ #define CAAM_BLOB_KEYMOD_LENGTH 16 #define CAAM_BLOB_OVERHEAD (32 + 16) #define CAAM_BLOB_MAX_LEN 4096 +#define CAAM_ENC_ALGO_CCM 0x1 +#define CAAM_ENC_ALGO_ECB 0x2 +#define CAAM_NONCE_SIZE 6 +#define CAAM_ICV_SIZE 6 +#define CAAM_CCM_OVERHEAD (CAAM_NONCE_SIZE + CAAM_ICV_SIZE) struct caam_blob_priv; +/** + * struct caam_pkey_info - information for CAAM protected key + * @is_pkey: flag to identify, if the key is protected. + * @key_enc_algo: identifies the algorithm, ccm or ecb + * @plain_key_sz: size of plain key. + * @key_buf: contains key data + */ +struct caam_pkey_info { + u8 is_pkey; + u8 key_enc_algo; + u16 plain_key_sz; + u8 key_buf[]; +} __packed; + +/* sizeof struct caam_pkey_info */ +#define CAAM_PKEY_HEADER 4 + /** * struct caam_blob_info - information for CAAM blobbing + * @pkey_info: pointer to keep protected key information * @input: pointer to input buffer (must be DMAable) * @input_len: length of @input buffer in bytes. * @output: pointer to output buffer (must be DMAable) @@ -26,6 +50,8 @@ struct caam_blob_priv; * May not exceed %CAAM_BLOB_KEYMOD_LENGTH */ struct caam_blob_info { + struct caam_pkey_info pkey_info; + void *input; size_t input_len; -- cgit From aa653654ee67f9cbbebb7d4c18f360ad4fef3180 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 11 Oct 2025 09:48:55 +0800 Subject: rhashtable: use likely for rhashtable lookup Sometimes, the result of the rhashtable_lookup() is expected to be found. Therefore, we can use likely() for such cases. Following new functions are introduced, which will use likely or unlikely during the lookup: rhashtable_lookup_likely rhltable_lookup_likely A micro-benchmark is made for these new functions: lookup a existed entry repeatedly for 100000000 times, and rhashtable_lookup_likely() gets ~30% speedup. Signed-off-by: Menglong Dong Signed-off-by: Herbert Xu --- include/linux/rhashtable.h | 70 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 05a221ce79a6..08e664b21f5a 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl, local_irq_restore(flags); } -static inline struct rhash_head *__rht_ptr( - struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) +enum rht_lookup_freq { + RHT_LOOKUP_NORMAL, + RHT_LOOKUP_LIKELY, +}; + +static __always_inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt, + const enum rht_lookup_freq freq) { - return (struct rhash_head *) - ((unsigned long)p & ~BIT(0) ?: - (unsigned long)RHT_NULLS_MARKER(bkt)); + unsigned long p_val = (unsigned long)p & ~BIT(0); + + BUILD_BUG_ON(!__builtin_constant_p(freq)); + + if (freq == RHT_LOOKUP_LIKELY) + return (struct rhash_head *) + (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt)); + else + return (struct rhash_head *) + (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } /* @@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr( * rht_ptr_exclusive() dereferences in a context where exclusive * access is guaranteed, such as when destroying the table. */ +static __always_inline struct rhash_head *__rht_ptr_rcu( + struct rhash_lock_head __rcu *const *bkt, + const enum rht_lookup_freq freq) +{ + return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq); +} + static inline struct rhash_head *rht_ptr_rcu( struct rhash_lock_head __rcu *const *bkt) { - return __rht_ptr(rcu_dereference_all(*bkt), bkt); + return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL); } static inline struct rhash_head *rht_ptr( @@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr( struct bucket_table *tbl, unsigned int hash) { - return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt, + RHT_LOOKUP_NORMAL); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head __rcu *const *bkt) { - return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt, + RHT_LOOKUP_NORMAL); } static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, @@ -588,7 +610,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, /* Internal function, do not use. */ static __always_inline struct rhash_head *__rhashtable_lookup( struct rhashtable *ht, const void *key, - const struct rhashtable_params params) + const struct rhashtable_params params, + const enum rht_lookup_freq freq) { struct rhashtable_compare_arg arg = { .ht = ht, @@ -599,12 +622,13 @@ static __always_inline struct rhash_head *__rhashtable_lookup( struct rhash_head *he; unsigned int hash; + BUILD_BUG_ON(!__builtin_constant_p(freq)); tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash); do { - rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { + rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) @@ -643,11 +667,22 @@ static __always_inline void *rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { - struct rhash_head *he = __rhashtable_lookup(ht, key, params); + struct rhash_head *he = __rhashtable_lookup(ht, key, params, + RHT_LOOKUP_NORMAL); return he ? rht_obj(ht, he) : NULL; } +static __always_inline void *rhashtable_lookup_likely( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(ht, key, params, + RHT_LOOKUP_LIKELY); + + return likely(he) ? rht_obj(ht, he) : NULL; +} + /** * rhashtable_lookup_fast - search hash table, without RCU read lock * @ht: hash table @@ -693,11 +728,22 @@ static __always_inline struct rhlist_head *rhltable_lookup( struct rhltable *hlt, const void *key, const struct rhashtable_params params) { - struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); + struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, + RHT_LOOKUP_NORMAL); return he ? container_of(he, struct rhlist_head, rhead) : NULL; } +static __always_inline struct rhlist_head *rhltable_lookup_likely( + struct rhltable *hlt, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, + RHT_LOOKUP_LIKELY); + + return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL; +} + /* Internal function, please use rhashtable_insert_fast() instead. This * function returns the existing element already in hashes if there is a clash, * otherwise it returns an error via ERR_PTR(). -- cgit From 85e1a7ec61d9829af5897da421eb135c6cc73e07 Mon Sep 17 00:00:00 2001 From: T Pratham Date: Wed, 22 Oct 2025 22:48:42 +0530 Subject: crypto: aead - Add support for on-stack AEAD req allocation This patch introduces infrastructure for allocating req objects on the stack for AEADs. The additions mirror the existing sync skcipher APIs. This can be used in cases where simple sync AEAD operations are being done. So allocating the request on stack avoides possible out-of-memory errors. The struct crypto_sync_aead is a wrapper around crypto_aead and should be used in its place when sync only requests will be done on the stack. Correspondingly, the request should be allocated with SYNC_AEAD_REQUEST_ON_STACK(). Similar to sync_skcipher APIs, the new sync_aead APIs are wrappers around the regular aead APIs to facilitate sync only operations. The following crypto APIs are added: - struct crypto_sync_aead - crypto_alloc_sync_aead() - crypto_free_sync_aead() - crypto_aync_aead_tfm() - crypto_sync_aead_setkey() - crypto_sync_aead_setauthsize() - crypto_sync_aead_authsize() - crypto_sync_aead_maxauthsize() - crypto_sync_aead_ivsize() - crypto_sync_aead_blocksize() - crypto_sync_aead_get_flags() - crypto_sync_aead_set_flags() - crypto_sync_aead_clear_flags() - crypto_sync_aead_reqtfm() - aead_request_set_sync_tfm() - SYNC_AEAD_REQUEST_ON_STACK() Signed-off-by: T Pratham Signed-off-by: Herbert Xu --- include/crypto/aead.h | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) (limited to 'include') diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0e8a41638678..8e66a1fa9c78 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -159,6 +159,21 @@ struct crypto_aead { struct crypto_tfm base; }; +struct crypto_sync_aead { + struct crypto_aead base; +}; + +#define MAX_SYNC_AEAD_REQSIZE 384 + +#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \ + char __##name##_desc[sizeof(struct aead_request) + \ + MAX_SYNC_AEAD_REQSIZE \ + ] CRYPTO_MINALIGN_ATTR; \ + struct aead_request *name = \ + (((struct aead_request *)__##name##_desc)->base.tfm = \ + crypto_sync_aead_tfm((_tfm)), \ + (void *)__##name##_desc) + static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_aead, base); @@ -180,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) */ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); +struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask); + static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm) +{ + return crypto_aead_tfm(&tfm->base); +} + /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed @@ -196,6 +218,11 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } +static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm) +{ + crypto_free_aead(&tfm->base); +} + /** * crypto_has_aead() - Search for the availability of an aead. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -238,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); } +static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm) +{ + return crypto_aead_ivsize(&tfm->base); +} + /** * crypto_aead_authsize() - obtain maximum authentication data size * @tfm: cipher handle @@ -255,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) return tfm->authsize; } +static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm) +{ + return crypto_aead_authsize(&tfm->base); +} + static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) { return alg->maxauthsize; @@ -265,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); } +static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm) +{ + return crypto_aead_maxauthsize(&tfm->base); +} + /** * crypto_aead_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -280,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); } +static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm) +{ + return crypto_aead_blocksize(&tfm->base); +} + static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) { return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); @@ -300,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); } +static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm) +{ + return crypto_aead_get_flags(&tfm->base); +} + +static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags) +{ + crypto_aead_set_flags(&tfm->base, flags); +} + +static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags) +{ + crypto_aead_clear_flags(&tfm->base, flags); +} + /** * crypto_aead_setkey() - set key for cipher * @tfm: cipher handle @@ -319,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); +static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_aead_setkey(&tfm->base, key, keylen); +} + /** * crypto_aead_setauthsize() - set authentication data size * @tfm: cipher handle @@ -331,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm, */ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); +static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm, + unsigned int authsize) +{ + return crypto_aead_setauthsize(&tfm->base, authsize); +} + static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) { return __crypto_aead_cast(req->base.tfm); } +static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + return container_of(tfm, struct crypto_sync_aead, base); +} + /** * crypto_aead_encrypt() - encrypt plaintext * @req: reference to the aead_request handle that holds all information @@ -417,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req, req->base.tfm = crypto_aead_tfm(tfm); } +static inline void aead_request_set_sync_tfm(struct aead_request *req, + struct crypto_sync_aead *tfm) +{ + aead_request_set_tfm(req, &tfm->base); +} + /** * aead_request_alloc() - allocate request data structure * @tfm: cipher handle to be registered with the request -- cgit From 12ad5b2346f905a3962b4aee701191b7a8d1905a Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Thu, 23 Oct 2025 19:48:11 +0200 Subject: keys: Annotate struct asymmetric_key_id with __counted_by Add the __counted_by() compiler attribute to the flexible array member 'data' to improve access bounds-checking via CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE. Signed-off-by: Thorsten Blum Reviewed-by: Lukas Wunner Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- include/keys/asymmetric-type.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 69a13e1e5b2e..1b91c8f98688 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -49,7 +49,7 @@ enum asymmetric_payload_bits { */ struct asymmetric_key_id { unsigned short len; - unsigned char data[]; + unsigned char data[] __counted_by(len); }; struct asymmetric_key_ids { -- cgit From c7dcb041ce7d32c0becd43e8f99f993365e6bd20 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 13 Nov 2025 18:57:08 -0800 Subject: crypto: ansi_cprng - Remove unused ansi_cprng algorithm Remove ansi_cprng, since it's obsolete and unused, as confirmed at https://lore.kernel.org/r/aQxpnckYMgAAOLpZ@gondor.apana.org.au/ This was originally added in 2008, apparently as a FIPS approved random number generator. Whether this has ever belonged upstream is questionable. Either way, ansi_cprng is no longer usable for this purpose, since it's been superseded by the more modern algorithms in crypto/drbg.c, and FIPS itself no longer allows it. (NIST SP 800-131A Rev 1 (2015) says that RNGs based on ANSI X9.31 will be disallowed after 2015. NIST SP 800-131A Rev 2 (2019) confirms they are now disallowed.) Therefore, there is no reason to keep it around. Suggested-by: Herbert Xu Cc: Haotian Zhang Cc: Neil Horman Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/rng.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/crypto/rng.h b/include/crypto/rng.h index f8224cc390f8..d451b54b322a 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -169,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, * * The reset function completely re-initializes the random number generator * referenced by the cipher handle by clearing the current state. The new state - * is initialized with the caller provided seed or automatically, depending - * on the random number generator type (the ANSI X9.31 RNG requires - * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). - * The seed is provided as a parameter to this function call. The provided seed - * should have the length of the seed size defined for the random number - * generator as defined by crypto_rng_seedsize. + * is initialized with the caller provided seed or automatically, depending on + * the random number generator type. (The SP800-90A DRBGs perform an automatic + * seeding.) The seed is provided as a parameter to this function call. The + * provided seed should have the length of the seed size defined for the random + * number generator as defined by crypto_rng_seedsize. * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ -- cgit From 4dffc9bbffb9ccfcda730d899c97c553599e7ca8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 15 Nov 2025 15:08:16 -0800 Subject: crypto: scatterwalk - Fix memcpy_sglist() to always succeed The original implementation of memcpy_sglist() was broken because it didn't handle scatterlists that describe exactly the same memory, which is a case that many callers rely on. The current implementation is broken too because it calls the skcipher_walk functions which can fail. It ignores any errors from those functions. Fix it by replacing it with a new implementation written from scratch. It always succeeds. It's also a bit faster, since it avoids the overhead of skcipher_walk. skcipher_walk includes a lot of functionality (such as alignmask handling) that's irrelevant here. Reported-by: Colin Ian King Closes: https://lore.kernel.org/r/20251114122620.111623-1-coking@nvidia.com Fixes: 131bdceca1f0 ("crypto: scatterwalk - Add memcpy_sglist") Fixes: 0f8d42bf128d ("crypto: scatterwalk - Move skcipher walk and use it for memcpy_sglist") Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 52 ++++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 83d14376ff2b..f485454e3955 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -227,6 +227,34 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk, scatterwalk_advance(walk, nbytes); } +/* + * Flush the dcache of any pages that overlap the region + * [offset, offset + nbytes) relative to base_page. + * + * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure + * that all relevant code (including the call to sg_page() in the caller, if + * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE. + */ +static inline void __scatterwalk_flush_dcache_pages(struct page *base_page, + unsigned int offset, + unsigned int nbytes) +{ + unsigned int num_pages; + + base_page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + + /* + * This is an overflow-safe version of + * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE). + */ + num_pages = nbytes / PAGE_SIZE; + num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE); + + for (unsigned int i = 0; i < num_pages; i++) + flush_dcache_page(base_page + i); +} + /** * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist * @walk: the scatter_walk @@ -240,27 +268,9 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk, unsigned int nbytes) { scatterwalk_unmap(walk); - /* - * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just - * relying on flush_dcache_page() being a no-op when not implemented, - * since otherwise the BUG_ON in sg_page() does not get optimized out. - * This also avoids having to consider whether the loop would get - * reliably optimized out or not. - */ - if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) { - struct page *base_page; - unsigned int offset; - int start, end, i; - - base_page = sg_page(walk->sg); - offset = walk->offset; - start = offset >> PAGE_SHIFT; - end = start + (nbytes >> PAGE_SHIFT); - end += (offset_in_page(offset) + offset_in_page(nbytes) + - PAGE_SIZE - 1) >> PAGE_SHIFT; - for (i = start; i < end; i++) - flush_dcache_page(base_page + i); - } + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) + __scatterwalk_flush_dcache_pages(sg_page(walk->sg), + walk->offset, nbytes); scatterwalk_advance(walk, nbytes); } -- cgit From 20d868a77f11ba050fe96e7b8efb8ec3b6f2737f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 15 Nov 2025 15:08:17 -0800 Subject: Revert "crypto: scatterwalk - Move skcipher walk and use it for memcpy_sglist" This reverts commit 0f8d42bf128d349ad490e87d5574d211245e40f1, with the memcpy_sglist() part dropped. Now that memcpy_sglist() no longer uses the skcipher_walk code, the skcipher_walk code can be moved back to where it belongs. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 12 +++++++ include/crypto/internal/skcipher.h | 48 +++++++++++++++++++++++++++- include/crypto/scatterwalk.h | 65 ++------------------------------------ 3 files changed, 61 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index fc4574940636..05deea9dac5e 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -107,6 +107,18 @@ struct crypto_queue { unsigned int max_qlen; }; +struct scatter_walk { + /* Must be the first member, see struct skcipher_walk. */ + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; + struct scatterlist *sg; + unsigned int offset; +}; + struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index d5aa535263f6..0cad8e7364c8 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -55,6 +54,47 @@ struct crypto_lskcipher_spawn { struct crypto_spawn base; }; +struct skcipher_walk { + union { + /* Virtual address of the source. */ + struct { + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; + + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + + unsigned int nbytes; + unsigned int total; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) { @@ -171,6 +211,7 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); +int skcipher_walk_done(struct skcipher_walk *walk, int res); int skcipher_walk_virt(struct skcipher_walk *__restrict walk, struct skcipher_request *__restrict req, bool atomic); @@ -181,6 +222,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, struct aead_request *__restrict req, bool atomic); +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) { return crypto_tfm_ctx(&tfm->base); diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index f485454e3955..624fab589c2c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -11,64 +11,11 @@ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H -#include +#include + #include #include #include -#include - -struct scatter_walk { - /* Must be the first member, see struct skcipher_walk. */ - union { - void *const addr; - - /* Private API field, do not touch. */ - union crypto_no_such_thing *__addr; - }; - struct scatterlist *sg; - unsigned int offset; -}; - -struct skcipher_walk { - union { - /* Virtual address of the source. */ - struct { - struct { - const void *const addr; - } virt; - } src; - - /* Private field for the API, do not use. */ - struct scatter_walk in; - }; - - union { - /* Virtual address of the destination. */ - struct { - struct { - void *const addr; - } virt; - } dst; - - /* Private field for the API, do not use. */ - struct scatter_walk out; - }; - - unsigned int nbytes; - unsigned int total; - - u8 *page; - u8 *buffer; - u8 *oiv; - void *iv; - - unsigned int ivsize; - - int flags; - unsigned int blocksize; - unsigned int stride; - unsigned int alignmask; -}; static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int num) @@ -306,12 +253,4 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len); -int skcipher_walk_first(struct skcipher_walk *walk, bool atomic); -int skcipher_walk_done(struct skcipher_walk *walk, int res); - -static inline void skcipher_walk_abort(struct skcipher_walk *walk) -{ - skcipher_walk_done(walk, -ECANCELED); -} - #endif /* _CRYPTO_SCATTERWALK_H */ -- cgit