summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-03 11:28:38 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-03 11:28:38 -0800
commita619fe35ab41fded440d3762d4fbad84ff86a4d4 (patch)
tree2cc42a8bb9ec80f7850059c5cab383233c804957 /include
parentc8321831480d80af01ce001bd6626fc130fd13b1 (diff)
parent48bc9da3c97c15f1ea24934bcb3b736acd30163d (diff)
Merge tag 'v6.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Rewrite memcpy_sglist from scratch - Add on-stack AEAD request allocation - Fix partial block processing in ahash Algorithms: - Remove ansi_cprng - Remove tcrypt tests for poly1305 - Fix EINPROGRESS processing in authenc - Fix double-free in zstd Drivers: - Use drbg ctr helper when reseeding xilinx-trng - Add support for PCI device 0x115A to ccp - Add support of paes in caam - Add support for aes-xts in dthev2 Others: - Use likely in rhashtable lookup - Fix lockdep false-positive in padata by removing a helper" * tag 'v6.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits) crypto: zstd - fix double-free in per-CPU stream cleanup crypto: ahash - Zero positive err value in ahash_update_finish crypto: ahash - Fix crypto_ahash_import with partial block data crypto: lib/mpi - use min() instead of min_t() crypto: ccp - use min() instead of min_t() hwrng: core - use min3() instead of nested min_t() crypto: aesni - ctr_crypt() use min() instead of min_t() crypto: drbg - Delete unused ctx from struct sdesc crypto: testmgr - Add missing DES weak and semi-weak key tests Revert "crypto: scatterwalk - Move skcipher walk and use it for memcpy_sglist" crypto: scatterwalk - Fix memcpy_sglist() to always succeed crypto: iaa - Request to add Kanchana P Sridhar to Maintainers. crypto: tcrypt - Remove unused poly1305 support crypto: ansi_cprng - Remove unused ansi_cprng algorithm crypto: asymmetric_keys - fix uninitialized pointers with free attribute KEYS: Avoid -Wflex-array-member-not-at-end warning crypto: ccree - Correctly handle return of sg_nents_for_len crypto: starfive - Correctly handle return of sg_nents_for_len crypto: iaa - Fix incorrect return value in save_iaa_wq() crypto: zstd - Remove unnecessary size_t cast ...
Diffstat (limited to 'include')
-rw-r--r--include/crypto/aead.h87
-rw-r--r--include/crypto/algapi.h12
-rw-r--r--include/crypto/df_sp80090a.h28
-rw-r--r--include/crypto/drbg.h25
-rw-r--r--include/crypto/internal/drbg.h54
-rw-r--r--include/crypto/internal/skcipher.h48
-rw-r--r--include/crypto/rng.h11
-rw-r--r--include/crypto/scatterwalk.h117
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/linux/rhashtable.h70
-rw-r--r--include/soc/fsl/caam-blob.h26
11 files changed, 352 insertions, 128 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0e8a41638678..8e66a1fa9c78 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -159,6 +159,21 @@ struct crypto_aead {
struct crypto_tfm base;
};
+struct crypto_sync_aead {
+ struct crypto_aead base;
+};
+
+#define MAX_SYNC_AEAD_REQSIZE 384
+
+#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_desc[sizeof(struct aead_request) + \
+ MAX_SYNC_AEAD_REQSIZE \
+ ] CRYPTO_MINALIGN_ATTR; \
+ struct aead_request *name = \
+ (((struct aead_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_aead_tfm((_tfm)), \
+ (void *)__##name##_desc)
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
@@ -180,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_tfm(&tfm->base);
+}
+
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
@@ -196,6 +218,11 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+
/**
* crypto_has_aead() - Search for the availability of an aead.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -238,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
+static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_ivsize(&tfm->base);
+}
+
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
@@ -255,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_authsize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
@@ -265,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
+static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_maxauthsize(&tfm->base);
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -280,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
+static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
@@ -300,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
@@ -319,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
+static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(&tfm->base, key, keylen);
+}
+
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
@@ -331,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_aead_setauthsize(&tfm->base, authsize);
+}
+
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
+static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_aead, base);
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -417,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(tfm);
}
+static inline void aead_request_set_sync_tfm(struct aead_request *req,
+ struct crypto_sync_aead *tfm)
+{
+ aead_request_set_tfm(req, &tfm->base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index fc4574940636..05deea9dac5e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -107,6 +107,18 @@ struct crypto_queue {
unsigned int max_qlen;
};
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
new file mode 100644
index 000000000000..6b25305fe611
--- /dev/null
+++ b/include/crypto/df_sp80090a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ */
+
+#ifndef _CRYPTO_DF80090A_H
+#define _CRYPTO_DF80090A_H
+
+#include <crypto/internal/cipher.h>
+#include <crypto/aes.h>
+
+static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
+{
+ return statelen + /* df_data */
+ blocklen + /* pad */
+ blocklen + /* iv */
+ statelen + blocklen; /* temp */
+}
+
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+ unsigned char *df_data,
+ size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen);
+
+#endif /* _CRYPTO_DF80090A_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index af5ad51d3eef..2d42518cbdce 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
@@ -54,30 +55,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
-/*
- * Concatenation Helper and string operation helper
- *
- * SP800-90A requires the concatenation of different data. To avoid copying
- * buffers around or allocate additional memory, the following data structure
- * is used to point to the original memory with its size. In addition, it
- * is used to build a linked list. The linked list defines the concatenation
- * of individual buffers. The order of memory block referenced in that
- * linked list determines the order of concatenation.
- */
-struct drbg_string {
- const unsigned char *buf;
- size_t len;
- struct list_head list;
-};
-
-static inline void drbg_string_fill(struct drbg_string *string,
- const unsigned char *buf, size_t len)
-{
- string->buf = buf;
- string->len = len;
- INIT_LIST_HEAD(&string->list);
-}
-
struct drbg_state;
typedef uint32_t drbg_flag_t;
diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h
new file mode 100644
index 000000000000..371e52dcee6c
--- /dev/null
+++ b/include/crypto/internal/drbg.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _INTERNAL_DRBG_H
+#define _INTERNAL_DRBG_H
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @val value to be converted
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
+ */
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
+{
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *)buf;
+
+ conversion->conv = cpu_to_be32(val);
+}
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+#endif //_INTERNAL_DRBG_H
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index d5aa535263f6..0cad8e7364c8 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,7 +10,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
-#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -55,6 +54,47 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -171,6 +211,7 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -181,6 +222,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index f8224cc390f8..d451b54b322a 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -169,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
- * is initialized with the caller provided seed or automatically, depending
- * on the random number generator type (the ANSI X9.31 RNG requires
- * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
- * The seed is provided as a parameter to this function call. The provided seed
- * should have the length of the seed size defined for the random number
- * generator as defined by crypto_rng_seedsize.
+ * is initialized with the caller provided seed or automatically, depending on
+ * the random number generator type. (The SP800-90A DRBGs perform an automatic
+ * seeding.) The seed is provided as a parameter to this function call. The
+ * provided seed should have the length of the seed size defined for the random
+ * number generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 83d14376ff2b..624fab589c2c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,64 +11,11 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <linux/errno.h>
+#include <crypto/algapi.h>
+
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-struct scatter_walk {
- /* Must be the first member, see struct skcipher_walk. */
- union {
- void *const addr;
-
- /* Private API field, do not touch. */
- union crypto_no_such_thing *__addr;
- };
- struct scatterlist *sg;
- unsigned int offset;
-};
-
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int nbytes;
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -227,6 +174,34 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
scatterwalk_advance(walk, nbytes);
}
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
+{
+ unsigned int num_pages;
+
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (unsigned int i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
+}
+
/**
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
* @walk: the scatter_walk
@@ -240,27 +215,9 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
unsigned int nbytes)
{
scatterwalk_unmap(walk);
- /*
- * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
- * relying on flush_dcache_page() being a no-op when not implemented,
- * since otherwise the BUG_ON in sg_page() does not get optimized out.
- * This also avoids having to consider whether the loop would get
- * reliably optimized out or not.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
- struct page *base_page;
- unsigned int offset;
- int start, end, i;
-
- base_page = sg_page(walk->sg);
- offset = walk->offset;
- start = offset >> PAGE_SHIFT;
- end = start + (nbytes >> PAGE_SHIFT);
- end += (offset_in_page(offset) + offset_in_page(nbytes) +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = start; i < end; i++)
- flush_dcache_page(base_page + i);
- }
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
+ walk->offset, nbytes);
scatterwalk_advance(walk, nbytes);
}
@@ -296,12 +253,4 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
-int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
-
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 69a13e1e5b2e..1b91c8f98688 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -49,7 +49,7 @@ enum asymmetric_payload_bits {
*/
struct asymmetric_key_id {
unsigned short len;
- unsigned char data[];
+ unsigned char data[] __counted_by(len);
};
struct asymmetric_key_ids {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 05a221ce79a6..08e664b21f5a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_irq_restore(flags);
}
-static inline struct rhash_head *__rht_ptr(
- struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+enum rht_lookup_freq {
+ RHT_LOOKUP_NORMAL,
+ RHT_LOOKUP_LIKELY,
+};
+
+static __always_inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
{
- return (struct rhash_head *)
- ((unsigned long)p & ~BIT(0) ?:
- (unsigned long)RHT_NULLS_MARKER(bkt));
+ unsigned long p_val = (unsigned long)p & ~BIT(0);
+
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
+
+ if (freq == RHT_LOOKUP_LIKELY)
+ return (struct rhash_head *)
+ (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
+ else
+ return (struct rhash_head *)
+ (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
+static __always_inline struct rhash_head *__rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
+{
+ return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
+}
+
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_all(*bkt), bkt);
+ return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
- return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -588,7 +610,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
/* Internal function, do not use. */
static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -599,12 +622,13 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -643,11 +667,22 @@ static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
+static __always_inline void *rhashtable_lookup_likely(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? rht_obj(ht, he) : NULL;
+}
+
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -693,11 +728,22 @@ static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
+static __always_inline struct rhlist_head *rhltable_lookup_likely(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
index 937cac52f36d..922f7ec3e231 100644
--- a/include/soc/fsl/caam-blob.h
+++ b/include/soc/fsl/caam-blob.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024-2025 NXP
*/
#ifndef __CAAM_BLOB_GEN
@@ -12,11 +13,34 @@
#define CAAM_BLOB_KEYMOD_LENGTH 16
#define CAAM_BLOB_OVERHEAD (32 + 16)
#define CAAM_BLOB_MAX_LEN 4096
+#define CAAM_ENC_ALGO_CCM 0x1
+#define CAAM_ENC_ALGO_ECB 0x2
+#define CAAM_NONCE_SIZE 6
+#define CAAM_ICV_SIZE 6
+#define CAAM_CCM_OVERHEAD (CAAM_NONCE_SIZE + CAAM_ICV_SIZE)
struct caam_blob_priv;
/**
+ * struct caam_pkey_info - information for CAAM protected key
+ * @is_pkey: flag to identify, if the key is protected.
+ * @key_enc_algo: identifies the algorithm, ccm or ecb
+ * @plain_key_sz: size of plain key.
+ * @key_buf: contains key data
+ */
+struct caam_pkey_info {
+ u8 is_pkey;
+ u8 key_enc_algo;
+ u16 plain_key_sz;
+ u8 key_buf[];
+} __packed;
+
+/* sizeof struct caam_pkey_info */
+#define CAAM_PKEY_HEADER 4
+
+/**
* struct caam_blob_info - information for CAAM blobbing
+ * @pkey_info: pointer to keep protected key information
* @input: pointer to input buffer (must be DMAable)
* @input_len: length of @input buffer in bytes.
* @output: pointer to output buffer (must be DMAable)
@@ -26,6 +50,8 @@ struct caam_blob_priv;
* May not exceed %CAAM_BLOB_KEYMOD_LENGTH
*/
struct caam_blob_info {
+ struct caam_pkey_info pkey_info;
+
void *input;
size_t input_len;