summaryrefslogtreecommitdiff
path: root/include/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/acompress.h386
-rw-r--r--include/crypto/aead.h21
-rw-r--r--include/crypto/aes.h5
-rw-r--r--include/crypto/akcipher.h143
-rw-r--r--include/crypto/algapi.h51
-rw-r--r--include/crypto/authenc.h2
-rw-r--r--include/crypto/blake2b.h31
-rw-r--r--include/crypto/chacha.h100
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/ecc_curve.h2
-rw-r--r--include/crypto/ecdh.h1
-rw-r--r--include/crypto/gf128mul.h6
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h225
-rw-r--r--include/crypto/hkdf.h20
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/internal/acompress.h177
-rw-r--r--include/crypto/internal/akcipher.h4
-rw-r--r--include/crypto/internal/blake2b.h92
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/ecc.h35
-rw-r--r--include/crypto/internal/engine.h5
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h137
-rw-r--r--include/crypto/internal/poly1305.h28
-rw-r--r--include/crypto/internal/rsa.h29
-rw-r--r--include/crypto/internal/scompress.h30
-rw-r--r--include/crypto/internal/sha2.h66
-rw-r--r--include/crypto/internal/sig.h80
-rw-r--r--include/crypto/internal/simd.h22
-rw-r--r--include/crypto/internal/skcipher.h59
-rw-r--r--include/crypto/kpp.h58
-rw-r--r--include/crypto/krb5.h165
-rw-r--r--include/crypto/md5.h3
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/poly1305.h67
-rw-r--r--include/crypto/polyval.h8
-rw-r--r--include/crypto/public_key.h3
-rw-r--r--include/crypto/rng.h59
-rw-r--r--include/crypto/scatterwalk.h287
-rw-r--r--include/crypto/sha1.h9
-rw-r--r--include/crypto/sha1_base.h83
-rw-r--r--include/crypto/sha2.h62
-rw-r--r--include/crypto/sha256_base.h135
-rw-r--r--include/crypto/sha3.h20
-rw-r--r--include/crypto/sha512_base.h90
-rw-r--r--include/crypto/sig.h155
-rw-r--r--include/crypto/skcipher.h51
-rw-r--r--include/crypto/sm2.h28
-rw-r--r--include/crypto/sm3.h4
-rw-r--r--include/crypto/sm3_base.h94
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/crypto/utils.h2
55 files changed, 1955 insertions, 1362 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 80e243611fe2..9eacb9fa375d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -10,30 +10,93 @@
#define _CRYPTO_ACOMP_H
#include <linux/atomic.h>
+#include <linux/args.h>
+#include <linux/compiler_types.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Set this bit if source is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002
+
+/* Set this bit for if virtual address source cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004
+
+/* Set this bit if destination is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008
+
+/* Set this bit for if virtual address destination cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
+
+/* Private flags that should not be touched by the user. */
+#define CRYPTO_ACOMP_REQ_PRIVATE \
+ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
+ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
-#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
#define CRYPTO_ACOMP_DST_MAX 131072
+#define MAX_SYNC_COMP_REQSIZE 0
+
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_request_on_stack_init( \
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
+
+struct acomp_req;
+struct folio;
+
+struct acomp_req_chain {
+ crypto_completion_t compl;
+ void *data;
+ struct scatterlist ssg;
+ struct scatterlist dsg;
+ union {
+ const u8 *src;
+ struct folio *sfolio;
+ };
+ union {
+ u8 *dst;
+ struct folio *dfolio;
+ };
+ u32 flags;
+};
+
/**
* struct acomp_req - asynchronous (de)compression request
*
* @base: Common attributes for asynchronous crypto requests
- * @src: Source Data
- * @dst: Destination data
+ * @src: Source scatterlist
+ * @dst: Destination scatterlist
+ * @svirt: Source virtual address
+ * @dvirt: Destination virtual address
* @slen: Size of the input buffer
* @dlen: Size of the output buffer and number of bytes produced
- * @flags: Internal flags
+ * @chain: Private API code data, do not use
* @__ctx: Start of private context data
*/
struct acomp_req {
struct crypto_async_request base;
- struct scatterlist *src;
- struct scatterlist *dst;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
+ union {
+ struct scatterlist *dst;
+ u8 *dvirt;
+ };
unsigned int slen;
unsigned int dlen;
- u32 flags;
+
+ struct acomp_req_chain chain;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -43,44 +106,18 @@ struct acomp_req {
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the
- * algorithm
* @reqsize: Context size for (de)compression requests
+ * @fb: Synchronous fallback tfm
* @base: Common crypto API algorithm data structure
*/
struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
unsigned int reqsize;
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt: number of compress requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_cnt: number of decompress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @err_cnt: number of error for compress requests
- */
-struct crypto_istat_compress {
- atomic64_t compress_cnt;
- atomic64_t compress_tlen;
- atomic64_t decompress_cnt;
- atomic64_t decompress_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
-#else
-#define COMP_ALG_COMMON_STATS
-#endif
-
#define COMP_ALG_COMMON { \
- COMP_ALG_COMMON_STATS \
- \
struct crypto_alg base; \
}
struct comp_alg_common COMP_ALG_COMMON;
@@ -157,7 +194,7 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
static inline void acomp_request_set_tfm(struct acomp_req *req,
struct crypto_acomp *tfm)
{
- req->base.tfm = crypto_acomp_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
}
static inline bool acomp_is_async(struct crypto_acomp *tfm)
@@ -192,14 +229,72 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
+static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
+}
+
+static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+}
+
/**
* acomp_request_alloc() -- allocates asynchronous (de)compression request
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
*
* Return: allocated handle in case of success or NULL in case of an error
*/
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+static inline struct acomp_req *acomp_request_alloc_extra_noprof(
+ struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
+{
+ struct acomp_req *req;
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ if (check_add_overflow(len, extra, &len))
+ return NULL;
+
+ req = kzalloc_noprof(len, gfp);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+#define acomp_request_alloc_noprof(tfm, ...) \
+ CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
+ tfm, ##__VA_ARGS__)
+#define acomp_request_alloc_noprof_0(tfm) \
+ acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
+#define acomp_request_alloc_noprof_1(tfm, gfp) \
+ acomp_request_alloc_extra_noprof(tfm, 0, gfp)
+#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
+
+/**
+ * acomp_request_alloc_extra() -- allocate acomp request with extra memory
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @extra: amount of extra memory
+ * @gfp: gfp to pass to kzalloc
+ *
+ * Return: allocated handle in case of success or NULL in case of an error
+ */
+#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
+
+static inline void *acomp_request_extra(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ return (void *)((char *)req + len);
+}
+
+static inline bool acomp_req_on_stack(struct acomp_req *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
/**
* acomp_request_free() -- zeroize and free asynchronous (de)compression
@@ -208,7 +303,12 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
*
* @req: request to free
*/
-void acomp_request_free(struct acomp_req *req);
+static inline void acomp_request_free(struct acomp_req *req)
+{
+ if (!req || acomp_req_on_stack(req))
+ return;
+ kfree_sensitive(req);
+}
/**
* acomp_request_set_callback() -- Sets an asynchronous callback
@@ -226,10 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- req->base.complete = cmpl;
- req->base.data = data;
- req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
- req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+ flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flgs, cmpl, data);
}
/**
@@ -256,59 +355,183 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
- req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
- if (!req->dst)
- req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_VIRT |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
}
-static inline struct crypto_istat_compress *comp_get_stat(
- struct comp_alg_common *alg)
+/**
+ * acomp_request_set_src_sg() -- Sets source scatterlist
+ *
+ * Sets source scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @src: pointer to input buffer scatterlist
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_sg(struct acomp_req *req,
+ struct scatterlist *src,
+ unsigned int slen)
{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
+ req->src = src;
+ req->slen = slen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
}
-static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
+/**
+ * acomp_request_set_src_dma() -- Sets DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_dma(struct acomp_req *req,
+ const u8 *src, unsigned int slen)
{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
+ req->svirt = src;
+ req->slen = slen;
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&comp_get_stat(alg)->err_cnt);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
- return err;
+/**
+ * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_nondma(struct acomp_req *req,
+ const u8 *src,
+ unsigned int slen)
+{
+ req->svirt = src;
+ req->slen = slen;
+
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
}
/**
- * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ * acomp_request_set_src_folio() -- Sets source folio
*
- * Function invokes the asynchronous compress operation
+ * Sets source folio required by an acomp operation.
*
* @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_src_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.ssg, 1);
+ sg_set_folio(&req->chain.ssg, folio, len, off);
+ acomp_request_set_src_sg(req, &req->chain.ssg, len);
+}
+
+/**
+ * acomp_request_set_dst_sg() -- Sets destination scatterlist
*
- * Return: zero on success; error code in case of error
+ * Sets destination scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @dst: pointer to output buffer scatterlist
+ * @dlen: size of the output buffer
*/
-static inline int crypto_acomp_compress(struct acomp_req *req)
+static inline void acomp_request_set_dst_sg(struct acomp_req *req,
+ struct scatterlist *dst,
+ unsigned int dlen)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct comp_alg_common *alg;
+ req->dst = dst;
+ req->dlen = dlen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_dma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
- alg = crypto_comp_alg_common(tfm);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_compress *istat = comp_get_stat(alg);
+/**
+ * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
- atomic64_inc(&istat->compress_cnt);
- atomic64_add(req->slen, &istat->compress_tlen);
- }
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
- return crypto_comp_errstat(alg, tfm->compress(req));
+/**
+ * acomp_request_set_dst_folio() -- Sets destination folio
+ *
+ * Sets destination folio required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_dst_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.dsg, 1);
+ sg_set_folio(&req->chain.dsg, folio, len, off);
+ acomp_request_set_dst_sg(req, &req->chain.dsg, len);
}
/**
+ * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ *
+ * Function invokes the asynchronous compress operation
+ *
+ * @req: asynchronous compress request
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_acomp_compress(struct acomp_req *req);
+
+/**
* crypto_acomp_decompress() -- Invoke asynchronous decompress operation
*
* Function invokes the asynchronous decompress operation
@@ -317,21 +540,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_acomp_decompress(struct acomp_req *req)
-{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct comp_alg_common *alg;
+int crypto_acomp_decompress(struct acomp_req *req);
- alg = crypto_comp_alg_common(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_compress *istat = comp_get_stat(alg);
-
- atomic64_inc(&istat->decompress_cnt);
- atomic64_add(req->slen, &istat->decompress_tlen);
- }
+static inline struct acomp_req *acomp_request_on_stack_init(
+ char *buf, struct crypto_acomp *tfm)
+{
+ struct acomp_req *req = (void *)buf;
- return crypto_comp_errstat(alg, tfm->decompress(req));
+ crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
+ return req;
}
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 51382befbe37..0e8a41638678 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -101,22 +101,6 @@ struct aead_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for AEAD requests
- */
-struct crypto_istat_aead {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
@@ -135,7 +119,6 @@ struct crypto_istat_aead {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
- * @stat: statistics for AEAD algorithm
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -162,10 +145,6 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_aead stat;
-#endif
-
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 2090729701ab..9339da7c20a8 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -87,4 +87,9 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+
#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 31c111bebb68..cdf7da74bf2f 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -12,24 +12,19 @@
#include <linux/crypto.h>
/**
- * struct akcipher_request - public key request
+ * struct akcipher_request - public key cipher request
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * For verify op this is signature + digest, in that case
- * total size of @src is @src_len + @dst_len.
- * @dst: Destination data (Should be NULL for verify op)
+ * @dst: Destination data
* @src_len: Size of the input buffer
- * For verify op it's size of signature part of @src, this part
- * is supposed to be operated by cipher.
- * @dst_len: Size of @dst buffer (for all ops except verify).
+ * @dst_len: Size of @dst buffer
* It needs to be at least as big as the expected result
* depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
- * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -54,36 +49,9 @@ struct crypto_akcipher {
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @verify_cnt: number of verify operation
- * @sign_cnt: number of sign requests
- * @err_cnt: number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t verify_cnt;
- atomic64_t sign_cnt;
- atomic64_t err_cnt;
-};
-
/**
- * struct akcipher_alg - generic public key algorithm
+ * struct akcipher_alg - generic public key cipher algorithm
*
- * @sign: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
- * @verify: Function performs a complete verify operation as defined by
- * public key algorithm, returning verification status. Requires
- * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -110,13 +78,10 @@ struct crypto_istat_akcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @stat: Statistics for akcipher algorithm
*
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
- int (*sign)(struct akcipher_request *req);
- int (*verify)(struct akcipher_request *req);
int (*encrypt)(struct akcipher_request *req);
int (*decrypt)(struct akcipher_request *req);
int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key,
@@ -127,17 +92,13 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_akcipher stat;
-#endif
-
struct crypto_alg base;
};
/**
- * DOC: Generic Public Key API
+ * DOC: Generic Public Key Cipher API
*
- * The Public Key API is used with the algorithms of type
+ * The Public Key Cipher API is used with the algorithms of type
* CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
*/
@@ -268,10 +229,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list or NULL for verify op
+ * @dst: ptr to output scatter list
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list or size of signature
- * portion in @src for verify op
+ * @dst_len: size of the dst output scatter list
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -302,27 +262,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
-static inline struct crypto_istat_akcipher *akcipher_get_stat(
- struct akcipher_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -336,16 +275,8 @@ static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
-
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->src_len, &istat->encrypt_tlen);
- }
- return crypto_akcipher_errstat(alg, alg->encrypt(req));
+ return crypto_akcipher_alg(tfm)->encrypt(req);
}
/**
@@ -361,16 +292,8 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
-
- atomic64_inc(&istat->decrypt_cnt);
- atomic64_add(req->src_len, &istat->decrypt_tlen);
- }
- return crypto_akcipher_errstat(alg, alg->decrypt(req));
+ return crypto_akcipher_alg(tfm)->decrypt(req);
}
/**
@@ -410,52 +333,6 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
void *dst, unsigned int dlen);
/**
- * crypto_akcipher_sign() - Invoke public key sign operation
- *
- * Function invokes the specific public key sign operation for a given
- * public key algorithm
- *
- * @req: asymmetric key request
- *
- * Return: zero on success; error code in case of error
- */
-static inline int crypto_akcipher_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
-
- return crypto_akcipher_errstat(alg, alg->sign(req));
-}
-
-/**
- * crypto_akcipher_verify() - Invoke public key signature verification
- *
- * Function invokes the specific public key signature verification operation
- * for a given public key algorithm.
- *
- * @req: asymmetric key request
- *
- * Note: req->dst should be NULL, req->src should point to SG of size
- * (req->src_size + req->dst_size), containing signature (of req->src_size
- * length) with appended digest (of req->dst_size length).
- *
- * Return: zero on verification success; error code in case of error.
- */
-static inline int crypto_akcipher_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
-
- return crypto_akcipher_errstat(alg, alg->verify(req));
-}
-
-/**
* crypto_akcipher_set_pub_key() - Invoke set public key operation
*
* Function invokes the algorithm specific set key function, which knows
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 7a4a71af653f..188eface0a11 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -11,6 +11,7 @@
#include <linux/align.h>
#include <linux/cache.h>
#include <linux/crypto.h>
+#include <linux/list.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -53,23 +54,7 @@ struct rtattr;
struct scatterlist;
struct seq_file;
struct sk_buff;
-
-struct crypto_type {
- unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
- unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init_tfm)(struct crypto_tfm *tfm);
- void (*show)(struct seq_file *m, struct crypto_alg *alg);
- int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
- void (*free)(struct crypto_instance *inst);
-#ifdef CONFIG_CRYPTO_STATS
- int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
-#endif
-
- unsigned int type;
- unsigned int maskclear;
- unsigned int maskset;
- unsigned int tfmsize;
-};
+union crypto_no_such_thing;
struct crypto_instance {
struct crypto_alg alg;
@@ -83,16 +68,17 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
- struct work_struct free_work;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -121,11 +107,6 @@ struct crypto_queue {
unsigned int max_qlen;
};
-struct scatter_walk {
- struct scatterlist *sg;
- unsigned int offset;
-};
-
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
@@ -165,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+ CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+ inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+ __crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+ __crypto_inst_setname(inst, name, driver, alg)
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -274,4 +263,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
+static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT;
+}
+
+static inline u32 crypto_request_flags(struct crypto_async_request *req)
+{
+ return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index 5f92a986083c..15a9caa2354a 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -28,5 +28,7 @@ struct crypto_authenc_keys {
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen);
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
index 0c0176285349..dd7694477e50 100644
--- a/include/crypto/blake2b.h
+++ b/include/crypto/blake2b.h
@@ -7,10 +7,20 @@
#include <linux/types.h>
#include <linux/string.h>
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ /* The true state ends here. The rest is temporary storage. */
+ u64 f[2];
+};
+
enum blake2b_lengths {
BLAKE2B_BLOCK_SIZE = 128,
BLAKE2B_HASH_SIZE = 64,
BLAKE2B_KEY_SIZE = 64,
+ BLAKE2B_STATE_SIZE = offsetof(struct blake2b_state, f),
+ BLAKE2B_DESC_SIZE = sizeof(struct blake2b_state),
BLAKE2B_160_HASH_SIZE = 20,
BLAKE2B_256_HASH_SIZE = 32,
@@ -18,16 +28,6 @@ enum blake2b_lengths {
BLAKE2B_512_HASH_SIZE = 64,
};
-struct blake2b_state {
- /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
- u64 h[8];
- u64 t[2];
- u64 f[2];
- u8 buf[BLAKE2B_BLOCK_SIZE];
- unsigned int buflen;
- unsigned int outlen;
-};
-
enum blake2b_iv {
BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
@@ -40,7 +40,7 @@ enum blake2b_iv {
};
static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
- const void *key, size_t keylen)
+ size_t keylen)
{
state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
state->h[1] = BLAKE2B_IV1;
@@ -52,15 +52,6 @@ static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
state->h[7] = BLAKE2B_IV7;
state->t[0] = 0;
state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
- if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2B_BLOCK_SIZE;
- }
}
#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index b3ea73b81944..91f6b4cf561c 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,7 +15,8 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/string.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
@@ -25,21 +26,32 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#define CHACHA_KEY_WORDS 8
+#define CHACHA_STATE_WORDS 16
+#define HCHACHA_OUT_WORDS 8
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
-static inline void chacha20_block(u32 *state, u8 *stream)
+struct chacha_state {
+ u32 x[CHACHA_STATE_WORDS];
+};
+
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE], int nrounds);
+static inline void chacha20_block(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE])
{
- chacha_block_generic(state, stream, 20);
+ chacha_block_generic(state, out, 20);
}
-void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
-void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
-static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+static inline void hchacha_block(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
hchacha_block_arch(state, out, nrounds);
@@ -54,46 +66,40 @@ enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_TE_K = 0x6b206574U
};
-static inline void chacha_init_consts(u32 *state)
+static inline void chacha_init_consts(struct chacha_state *state)
{
- state[0] = CHACHA_CONSTANT_EXPA;
- state[1] = CHACHA_CONSTANT_ND_3;
- state[2] = CHACHA_CONSTANT_2_BY;
- state[3] = CHACHA_CONSTANT_TE_K;
+ state->x[0] = CHACHA_CONSTANT_EXPA;
+ state->x[1] = CHACHA_CONSTANT_ND_3;
+ state->x[2] = CHACHA_CONSTANT_2_BY;
+ state->x[3] = CHACHA_CONSTANT_TE_K;
}
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init(struct chacha_state *state,
+ const u32 key[CHACHA_KEY_WORDS],
+ const u8 iv[CHACHA_IV_SIZE])
{
chacha_init_consts(state);
- state[4] = key[0];
- state[5] = key[1];
- state[6] = key[2];
- state[7] = key[3];
- state[8] = key[4];
- state[9] = key[5];
- state[10] = key[6];
- state[11] = key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
-}
-
-static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- chacha_init_arch(state, key, iv);
- else
- chacha_init_generic(state, key, iv);
+ state->x[4] = key[0];
+ state->x[5] = key[1];
+ state->x[6] = key[2];
+ state->x[7] = key[3];
+ state->x[8] = key[4];
+ state->x[9] = key[5];
+ state->x[10] = key[6];
+ state->x[11] = key[7];
+ state->x[12] = get_unaligned_le32(iv + 0);
+ state->x[13] = get_unaligned_le32(iv + 4);
+ state->x[14] = get_unaligned_le32(iv + 8);
+ state->x[15] = get_unaligned_le32(iv + 12);
}
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
+static inline void chacha_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
@@ -102,10 +108,24 @@ static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
-static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
+static inline void chacha20_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src, unsigned int bytes)
{
chacha_crypt(state, dst, src, bytes, 20);
}
+static inline void chacha_zeroize_state(struct chacha_state *state)
+{
+ memzero_explicit(state, sizeof(*state));
+}
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)
+bool chacha_is_arch_optimized(void);
+#else
+static inline bool chacha_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index a1c66d1001af..06984a26c8cf 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,58 +8,8 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
index 70964781eb68..7d90c5e82266 100644
--- a/include/crypto/ecc_curve.h
+++ b/include/crypto/ecc_curve.h
@@ -23,6 +23,7 @@ struct ecc_point {
* struct ecc_curve - definition of elliptic curve
*
* @name: Short name of the curve.
+ * @nbits: The number of bits of a curve.
* @g: Generator point of the curve.
* @p: Prime number, if Barrett's reduction is used for this curve
* pre-calculated value 'mu' is appended to the @p after ndigits.
@@ -34,6 +35,7 @@ struct ecc_point {
*/
struct ecc_curve {
char *name;
+ u32 nbits;
struct ecc_point g;
u64 *p;
u64 *n;
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a9f98078d29c..9784ecdd2fb4 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -26,6 +26,7 @@
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
#define ECC_CURVE_NIST_P384 0x0003
+#define ECC_CURVE_NIST_P521 0x0004
/**
* struct ecdh - define an ECDH private key
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 81330c6446f6..b0853f7cada0 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
-/* A slow generic version of gf_mul, implemented for lle and bbe
+/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
-void gf128mul_bbe(be128 *a, const be128 *b);
-
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index f832c9f2aca3..043d938e9a2c 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -7,18 +7,18 @@
#define __CRYPTO_GHASH_H__
#include <linux/types.h>
-#include <crypto/gf128mul.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+struct gf128mul_4k;
+
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 5d61f576cfc8..6f6b9de12cd3 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,10 +8,17 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
-#include <linux/atomic.h>
#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
+/* Set this bit for virtual address instead of SG list. */
+#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+
+#define CRYPTO_AHASH_REQ_PRIVATE \
+ CRYPTO_AHASH_REQ_VIRT
+
struct crypto_ahash;
/**
@@ -24,26 +31,7 @@ struct crypto_ahash;
*/
/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt: number of hash requests
- * @hash_tlen: total data size hashed
- * @err_cnt: number of error for hash requests
- */
-struct crypto_istat_hash {
- atomic64_t hash_cnt;
- atomic64_t hash_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
-#else
-#define HASH_ALG_COMMON_STAT
-#endif
-
-/*
* struct hash_alg_common - define properties of message digest
- * @stat: Statistics for hash algorithm.
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
* store the resulting hash into it. For various predefined sizes,
@@ -60,8 +48,6 @@ struct crypto_istat_hash {
* information.
*/
#define HASH_ALG_COMMON { \
- HASH_ALG_COMMON_STAT \
- \
unsigned int digestsize; \
unsigned int statesize; \
\
@@ -73,11 +59,15 @@ struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
- struct scatterlist *src;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
u8 *result;
- /* This field may only be used by the ahash API code. */
- void *priv;
+ struct scatterlist sg_head[2];
+ crypto_completion_t saved_complete;
+ void *saved_data;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -99,6 +89,8 @@ struct ahash_request {
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
+ * For block-only algorithms, @update must return the number
+ * of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -141,6 +133,10 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @export_core: Export partial state without partial block. Only defined
+ * for algorithms that are not block-only.
+ * @import_core: Import partial state without partial block. Only defined
+ * for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
@@ -163,6 +159,8 @@ struct ahash_alg {
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
+ int (*export_core)(struct ahash_request *req, void *out);
+ int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
@@ -179,17 +177,31 @@ struct shash_desc {
#define HASH_MAX_DIGESTSIZE 64
+/* Worst case is sha3-224. */
+#define HASH_MAX_STATESIZE 200 + 144 + 1
+
/*
- * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
- * containing a 'struct sha3_state'.
+ * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
+ * containing a 'struct s390_sha_ctx'.
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
+#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
+ HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+#define HASH_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = \
+ ahash_request_on_stack_init(__##name##_req, (_tfm))
+
+#define HASH_REQUEST_CLONE(name, gfp) \
+ hash_request_clone(name, sizeof(__##name##_req), gfp)
+
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
@@ -199,6 +211,8 @@ struct shash_desc {
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
+ * @export_core: see struct ahash_alg
+ * @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
@@ -229,6 +243,8 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
+ int (*export_core)(struct shash_desc *desc, void *out);
+ int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
@@ -243,7 +259,6 @@ struct shash_alg {
};
};
#undef HASH_ALG_COMMON
-#undef HASH_ALG_COMMON_STAT
struct crypto_ahash {
bool using_shash; /* Underlying algorithm is shash, not ahash */
@@ -253,7 +268,6 @@ struct crypto_ahash {
};
struct crypto_shash {
- unsigned int descsize;
struct crypto_tfm base;
};
@@ -267,6 +281,11 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
+static inline bool ahash_req_on_stack(struct ahash_request *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
@@ -474,7 +493,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return crypto_ahash_finup(req);
+}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
@@ -563,7 +586,7 @@ int crypto_ahash_update(struct ahash_request *req);
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
- req->base.tfm = crypto_ahash_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
@@ -578,28 +601,26 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct ahash_request *ahash_request_alloc(
+static inline struct ahash_request *ahash_request_alloc_noprof(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
- req = kmalloc(sizeof(struct ahash_request) +
- crypto_ahash_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
+#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
/**
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
-static inline void ahash_request_free(struct ahash_request *req)
-{
- kfree_sensitive(req);
-}
+void ahash_request_free(struct ahash_request *req);
static inline void ahash_request_zero(struct ahash_request *req)
{
@@ -643,9 +664,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
+ flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -668,6 +689,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->src = src;
req->nbytes = nbytes;
req->result = result;
+ req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
+}
+
+/**
+ * ahash_request_set_virt() - set virtual address data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source virtual address
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source virtual address
+ *
+ * By using this call, the caller references the source virtual address.
+ * The source virtual address points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_virt(struct ahash_request *req,
+ const u8 *src, u8 *result,
+ unsigned int nbytes)
+{
+ req->svirt = src;
+ req->nbytes = nbytes;
+ req->result = result;
+ req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
/**
@@ -805,7 +850,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return tfm->descsize;
+ return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -823,7 +868,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -840,7 +885,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -860,12 +905,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
@@ -875,7 +923,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
@@ -889,7 +937,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out);
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
@@ -902,19 +950,29 @@ int crypto_shash_import(struct shash_desc *desc, const void *in);
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
-static inline int crypto_shash_init(struct shash_desc *desc)
-{
- struct crypto_shash *tfm = desc->tfm;
+int crypto_shash_init(struct shash_desc *desc);
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->init(desc);
-}
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
@@ -924,12 +982,15 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_shash_finup(desc, data, len, NULL);
+}
/**
* crypto_shash_final() - calculate message digest
@@ -941,29 +1002,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-/**
- * crypto_shash_finup() - calculate message digest of buffer
- * @desc: see crypto_shash_final()
- * @data: see crypto_shash_update()
- * @len: see crypto_shash_update()
- * @out: see crypto_shash_final()
- *
- * This function is a "short-hand" for the function calls of
- * crypto_shash_update and crypto_shash_final. The parameters have the same
- * meaning as discussed for those separate functions.
- *
- * Context: Any context.
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
+{
+ return crypto_shash_finup(desc, NULL, 0, out);
+}
static inline void shash_desc_zero(struct shash_desc *desc)
{
@@ -971,4 +1017,25 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
+static inline bool ahash_is_async(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_is_async(&tfm->base);
+}
+
+static inline struct ahash_request *ahash_request_on_stack_init(
+ char *buf, struct crypto_ahash *tfm)
+{
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
+ return req;
+}
+
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
+}
+
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/hkdf.h b/include/crypto/hkdf.h
new file mode 100644
index 000000000000..6a9678f508f5
--- /dev/null
+++ b/include/crypto/hkdf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869
+ *
+ * Extracted from fs/crypto/hkdf.c, which has
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _CRYPTO_HKDF_H
+#define _CRYPTO_HKDF_H
+
+#include <crypto/hash.h>
+
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk);
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
+#endif
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 78ecaf5db04c..f7b3b93f3a49 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -166,7 +166,8 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
+int af_alg_accept(struct sock *sk, struct socket *newsock,
+ struct proto_accept_arg *arg);
void af_alg_free_sg(struct af_alg_sgl *sgl);
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 4ac46bafba9d..ffffd88bbbad 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,13 +11,23 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+#define ACOMP_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
* @init: Initialize the cryptographic transformation object.
* This function is used to initialize the cryptographic
* transformation object. This function is called only once at
@@ -30,26 +40,70 @@
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Context size for (de)compression requests
- * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
-
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -69,21 +123,6 @@ static inline void acomp_request_complete(struct acomp_req *req,
crypto_request_complete(&req->base, err);
}
-static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
-{
- struct acomp_req *req;
-
- req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
- if (likely(req))
- acomp_request_set_tfm(req, tfm);
- return req;
-}
-
-static inline void __acomp_request_free(struct acomp_req *req)
-{
- kfree_sensitive(req);
-}
-
/**
* crypto_register_acomp() -- Register asynchronous compression algorithm
*
@@ -109,4 +148,100 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+static inline bool acomp_request_issg(struct acomp_req *req)
+{
+ return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT));
+}
+
+static inline bool acomp_request_src_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+static inline bool acomp_request_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
+}
+
+static inline bool acomp_request_src_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
+}
+
+static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
+}
+
+static inline bool acomp_request_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
+{
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
+}
+
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
+}
+
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
+}
+
#endif
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index a0fba4b2eccf..14ee62bc52b6 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -124,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
/**
* crypto_register_akcipher() -- Register public key algorithm
*
- * Function registers an implementation of a public key verify algorithm
+ * Function registers an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*
@@ -135,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg);
/**
* crypto_unregister_akcipher() -- Unregister public key algorithm
*
- * Function unregisters an implementation of a public key verify algorithm
+ * Function unregisters an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*/
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
index 982fe5e8471c..3e09e2485306 100644
--- a/include/crypto/internal/blake2b.h
+++ b/include/crypto/internal/blake2b.h
@@ -7,65 +7,36 @@
#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
#define _CRYPTO_INTERNAL_BLAKE2B_H
+#include <asm/byteorder.h>
#include <crypto/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/array_size.h>
+#include <linux/compiler.h>
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
+#include <linux/types.h>
static inline void blake2b_set_lastblock(struct blake2b_state *state)
{
state->f[0] = -1;
+ state->f[1] = 0;
}
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
+static inline void blake2b_set_nonlast(struct blake2b_state *state)
{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ state->f[0] = 0;
+ state->f[1] = 0;
}
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
/* Helper functions for shash implementations of BLAKE2b */
struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
+ u8 key[BLAKE2B_BLOCK_SIZE];
unsigned int keylen;
};
@@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ if (keylen > BLAKE2B_KEY_SIZE)
return -EINVAL;
+ BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE);
+
memcpy(tctx->key, key, keylen);
+ memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen);
tctx->keylen = keylen;
return 0;
@@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc)
struct blake2b_state *state = shash_desc_ctx(desc);
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
+ __blake2b_init(state, outlen, tctx->keylen);
+ return tctx->keylen ?
+ crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0;
}
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
+static inline int crypto_blake2b_update_bo(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
- __blake2b_update(state, in, inlen, compress);
- return 0;
+ blake2b_set_nonlast(state);
+ compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE);
+ return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE);
}
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ int i;
- __blake2b_final(state, out, compress);
+ memcpy(buf, in, inlen);
+ memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen);
+ blake2b_set_lastblock(state);
+ compress(state, buf, 1, inlen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, crypto_shash_digestsize(desc->tfm));
+ memzero_explicit(buf, sizeof(buf));
return 0;
}
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
deleted file mode 100644
index fd54074332f5..000000000000
--- a/include/crypto/internal/cryptouser.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-#else
-static inline int crypto_reportstat(struct sk_buff *in_skb,
- struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- return -ENOTSUPP;
-}
-#endif
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 4f6c1a68882f..57cd75242141 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -27,13 +27,14 @@
#define _CRYPTO_ECC_H
#include <crypto/ecc_curve.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
#define ECC_CURVE_NIST_P384_DIGITS 6
-#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
+#define ECC_CURVE_NIST_P521_DIGITS 9
+#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */
#define ECC_DIGITS_TO_BYTES_SHIFT 3
@@ -41,6 +42,18 @@
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+/*
+ * The integers r and s making up the signature are expected to be
+ * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES.
+ * The bytes within each u64 digit are in native endianness,
+ * but the order of the u64 digits themselves is little endian.
+ * This format allows direct use by internal vli_*() functions.
+ */
+struct ecdsa_raw_sig {
+ u64 r[ECC_MAX_DIGITS];
+ u64 s[ECC_MAX_DIGITS];
+};
+
/**
* ecc_swap_digits() - Copy ndigits from big endian array to native array
* @in: Input array
@@ -57,6 +70,19 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
}
/**
+ * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
+ * @in: Input byte array
+ * @nbytes Size of input byte array
+ * @out Output digits array
+ * @ndigits: Number of digits to create from byte array
+ *
+ * The first byte in the input byte array is expected to hold the most
+ * significant bits of the large integer.
+ */
+void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits);
+
+/**
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
@@ -81,7 +107,8 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
* Returns 0 if the private key was generated successfully, a negative value
* if an error occurred.
*/
-int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key);
/**
* ecc_make_pub_key() - Compute an ECC public key
@@ -278,4 +305,6 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
const u64 *y, const struct ecc_point *q,
const struct ecc_curve *curve);
+extern struct crypto_template ecdsa_x962_tmpl;
+extern struct crypto_template ecdsa_p1363_tmpl;
#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index fbf4be56cf12..b6a4ea2240fc 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -27,10 +27,10 @@ struct device;
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
@@ -51,14 +51,13 @@ struct crypto_engine {
bool running;
bool retry_support;
+ bool rt;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
- bool rt;
-
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 58967593b6b4..0f85c543f80b 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,21 +11,25 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
-struct ahash_request;
-struct scatterlist;
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
-struct crypto_hash_walk {
- char *data;
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
- unsigned int offset;
- unsigned int flags;
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
- struct page *pg;
- unsigned int entrylen;
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
- unsigned int total;
- struct scatterlist *sg;
-};
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
+struct ahash_request;
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -57,21 +61,13 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
-{
- return !(walk->entrylen | walk->total);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -81,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -210,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err)
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -270,5 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base);
}
+static inline bool ahash_request_isvirt(struct ahash_request *req)
+{
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+}
+
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
+{
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
+}
+
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 196aa769f296..c60315f47562 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <asm/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+void poly1305_block_init_arch(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_block_init_generic(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit);
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]);
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index e870133f4b77..071a1951b992 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -8,6 +8,7 @@
#ifndef _RSA_HELPER_
#define _RSA_HELPER_
#include <linux/types.h>
+#include <crypto/akcipher.h>
/**
* rsa_key - RSA key structure
@@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);
+#define RSA_PUB (true)
+#define RSA_PRIV (false)
+
+static inline int rsa_set_key(struct crypto_akcipher *child,
+ unsigned int *key_size, bool is_pubkey,
+ const void *key, unsigned int keylen)
+{
+ int err;
+
+ *key_size = 0;
+
+ if (is_pubkey)
+ err = crypto_akcipher_set_pub_key(child, key, keylen);
+ else
+ err = crypto_akcipher_set_priv_key(child, key, keylen);
+ if (err)
+ return err;
+
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(child);
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ *key_size = err;
+ return 0;
+}
+
extern struct crypto_template rsa_pkcs1pad_tmpl;
+extern struct crypto_template rsassa_pkcs1_tmpl;
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 858fe3965ae3..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,12 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-#define SCOMP_SCRATCH_SIZE 131072
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -27,13 +22,11 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(struct crypto_scomp *tfm);
- void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -42,6 +35,14 @@ struct scomp_alg {
void *ctx);
union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
+
+ union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
@@ -72,17 +73,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
}
-static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
-{
- return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
-}
-
-static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
- void *ctx)
-{
- return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
-}
-
static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
new file mode 100644
index 000000000000..b9bccd3ff57f
--- /dev/null
+++ b/include/crypto/internal/sha2.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _CRYPTO_INTERNAL_SHA2_H
+#define _CRYPTO_INTERNAL_SHA2_H
+
+#include <crypto/internal/simd.h>
+#include <crypto/sha2.h>
+#include <linux/compiler_attributes.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256)
+bool sha256_is_arch_optimized(void);
+#else
+static inline bool sha256_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static inline void sha256_choose_blocks(
+ u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
+ bool force_generic, bool force_simd)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
+ sha256_blocks_generic(state, data, nblocks);
+ else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
+ (force_simd || crypto_simd_usable()))
+ sha256_blocks_simd(state, data, nblocks);
+ else
+ sha256_blocks_arch(state, data, nblocks);
+}
+
+static __always_inline void sha256_finup(
+ struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
+ size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
+ bool force_generic, bool force_simd)
+{
+ const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
+ __be64 *bits = (__be64 *)&buf[bit_offset];
+ int i;
+
+ buf[len++] = 0x80;
+ if (len > bit_offset) {
+ memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic,
+ force_simd);
+ len = 0;
+ }
+
+ memset(&buf[len], 0, bit_offset - len);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
+
+ for (i = 0; i < digest_size; i += 4)
+ put_unaligned_be32(sctx->state[i / 4], out + i);
+}
+
+#endif /* _CRYPTO_INTERNAL_SHA2_H */
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
index 97cb26ef8115..b16648c1a986 100644
--- a/include/crypto/internal/sig.h
+++ b/include/crypto/internal/sig.h
@@ -10,8 +10,88 @@
#include <crypto/algapi.h>
#include <crypto/sig.h>
+struct sig_instance {
+ void (*free)(struct sig_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct sig_alg, base)];
+ struct crypto_instance base;
+ };
+ struct sig_alg alg;
+ };
+};
+
+struct crypto_sig_spawn {
+ struct crypto_spawn base;
+};
+
static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+
+/**
+ * crypto_register_sig() -- Register public key signature algorithm
+ *
+ * Function registers an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_sig(struct sig_alg *alg);
+
+/**
+ * crypto_unregister_sig() -- Unregister public key signature algorithm
+ *
+ * Function unregisters an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_sig(struct sig_alg *alg);
+
+int sig_register_instance(struct crypto_template *tmpl,
+ struct sig_instance *inst);
+
+static inline struct sig_instance *sig_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct sig_instance, alg.base);
+}
+
+static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm)
+{
+ return sig_instance(crypto_tfm_alg_instance(&tfm->base));
+}
+
+static inline struct crypto_instance *sig_crypto_instance(struct sig_instance
+ *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline void *sig_instance_ctx(struct sig_instance *inst)
+{
+ return crypto_instance_ctx(sig_crypto_instance(inst));
+}
+
+int crypto_grab_sig(struct crypto_sig_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn
+ *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn
+ *spawn)
+{
+ return container_of(spawn->base.alg, struct sig_alg, base);
+}
#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index d2316242a988..7e7f1ac3b7fd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -14,11 +15,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@@ -32,13 +32,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename);
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename);
-void simd_aead_free(struct simd_aead_alg *alg);
-
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);
@@ -51,14 +44,9 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
*
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
- * self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * self-tests, in order to test the no-SIMD fallback code.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 7ae42afdcf3e..d5aa535263f6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,8 +10,8 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
-#include <linux/list.h>
#include <linux/types.h>
/*
@@ -55,40 +55,6 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
-struct skcipher_walk {
- union {
- struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
- void *addr;
- } virt;
- } src, dst;
-
- struct scatter_walk in;
- unsigned int nbytes;
-
- struct scatter_walk out;
- unsigned int total;
-
- struct list_head buffers;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -205,22 +171,15 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int err);
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req,
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req,
bool atomic);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req);
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err);
-
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1988e24a0d1d..2d9c4de57b69 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -51,20 +51,6 @@ struct crypto_kpp {
struct crypto_tfm base;
};
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt: number of error for KPP requests
- */
-struct crypto_istat_kpp {
- atomic64_t setsecret_cnt;
- atomic64_t generate_public_key_cnt;
- atomic64_t compute_shared_secret_cnt;
- atomic64_t err_cnt;
-};
-
/**
* struct kpp_alg - generic key-agreement protocol primitives
*
@@ -87,7 +73,6 @@ struct crypto_istat_kpp {
* @exit: Undo everything @init did.
*
* @base: Common crypto API algorithm data structure
- * @stat: Statistics for KPP algorithm
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -100,10 +85,6 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_kpp stat;
-#endif
-
struct crypto_alg base;
};
@@ -291,26 +272,6 @@ struct kpp_secret {
unsigned short len;
};
-static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&kpp_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -329,12 +290,7 @@ static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
-
- return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
+ return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len);
}
/**
@@ -353,12 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
- return crypto_kpp_errstat(alg, alg->generate_public_key(req));
+ return crypto_kpp_alg(tfm)->generate_public_key(req);
}
/**
@@ -374,12 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
- return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
+ return crypto_kpp_alg(tfm)->compute_shared_secret(req);
}
/**
diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h
new file mode 100644
index 000000000000..71dd38f59be1
--- /dev/null
+++ b/include/crypto/krb5.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos 5 crypto
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _CRYPTO_KRB5_H
+#define _CRYPTO_KRB5_H
+
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+
+struct crypto_shash;
+struct scatterlist;
+
+/*
+ * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped
+ * to linux kernel crypto routines.
+ */
+#define KRB5_ENCTYPE_NULL 0x0000
+#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
+#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */
+#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */
+#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */
+/* XXX deprecated? */
+#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */
+#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
+#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008
+#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
+#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017
+#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a
+#define KRB5_ENCTYPE_UNKNOWN 0x01ff
+
+#define KRB5_CKSUMTYPE_CRC32 0x0001
+#define KRB5_CKSUMTYPE_RSA_MD4 0x0002
+#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003
+#define KRB5_CKSUMTYPE_DESCBC 0x0004
+#define KRB5_CKSUMTYPE_RSA_MD5 0x0007
+#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008
+#define KRB5_CKSUMTYPE_NIST_SHA 0x0009
+#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
+#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
+
+/*
+ * Constants used for key derivation
+ */
+/* from rfc3961 */
+#define KEY_USAGE_SEED_CHECKSUM (0x99)
+#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
+#define KEY_USAGE_SEED_INTEGRITY (0x55)
+
+/*
+ * Standard Kerberos error codes.
+ */
+#define KRB5_PROG_KEYTYPE_NOSUPP -1765328233
+
+/*
+ * Mode of operation.
+ */
+enum krb5_crypto_mode {
+ KRB5_CHECKSUM_MODE, /* Checksum only */
+ KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */
+};
+
+struct krb5_buffer {
+ unsigned int len;
+ void *data;
+};
+
+/*
+ * Kerberos encoding type definition.
+ */
+struct krb5_enctype {
+ int etype; /* Encryption (key) type */
+ int ctype; /* Checksum type */
+ const char *name; /* "Friendly" name */
+ const char *encrypt_name; /* Crypto encrypt+checksum name */
+ const char *cksum_name; /* Crypto checksum name */
+ const char *hash_name; /* Crypto hash name */
+ const char *derivation_enc; /* Cipher used in key derivation */
+ u16 block_len; /* Length of encryption block */
+ u16 conf_len; /* Length of confounder (normally == block_len) */
+ u16 cksum_len; /* Length of checksum */
+ u16 key_bytes; /* Length of raw key, in bytes */
+ u16 key_len; /* Length of final key, in bytes */
+ u16 hash_len; /* Length of hash in bytes */
+ u16 prf_len; /* Length of PRF() result in bytes */
+ u16 Kc_len; /* Length of Kc in bytes */
+ u16 Ke_len; /* Length of Ke in bytes */
+ u16 Ki_len; /* Length of Ki in bytes */
+ bool keyed_cksum; /* T if a keyed cksum */
+
+ const struct krb5_crypto_profile *profile;
+
+ int (*random_to_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *in,
+ struct krb5_buffer *out); /* complete key generation */
+};
+
+/*
+ * krb5_api.c
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype);
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset);
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset);
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len);
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * krb5_kdf.c
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+#endif /* _CRYPTO_KRB5_H */
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index cf9e9dec3d21..198b5d69b92f 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,7 @@
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_STATE_SIZE 24
#define MD5_H0 0x67452301UL
#define MD5_H1 0xefcdab89UL
@@ -18,8 +19,8 @@ extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
struct md5_state {
u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
+ u32 block[MD5_BLOCK_WORDS];
};
#endif
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 0ef577cc00e3..1c66abf9de3b 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,4 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
-void crypto_put_default_null_skcipher(void);
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 090692ec3bc7..e54abda8cfe9 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -7,7 +7,6 @@
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
@@ -38,17 +37,8 @@ struct poly1305_state {
};
};
-struct poly1305_desc_ctx {
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* how many keys have been set in r[] */
- unsigned short rset;
- /* whether s[] has been set */
- bool sset;
- /* finalize key */
- u32 s[4];
+/* Combined state for block function. */
+struct poly1305_block_state {
/* accumulator */
struct poly1305_state h;
/* key */
@@ -58,42 +48,29 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-
-static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_init_arch(desc, key);
- else
- poly1305_init_generic(desc, key);
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-
-static inline void poly1305_update(struct poly1305_desc_ctx *desc,
- const u8 *src, unsigned int nbytes)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_update_arch(desc, src, nbytes);
- else
- poly1305_update_generic(desc, src, nbytes);
-}
+struct poly1305_desc_ctx {
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* finalize key */
+ u32 s[4];
+ struct poly1305_block_state state;
+};
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes);
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
-static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)
+bool poly1305_is_arch_optimized(void);
+#else
+static inline bool poly1305_is_arch_optimized(void)
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_final_arch(desc, digest);
- else
- poly1305_final_generic(desc, digest);
+ return false;
}
+#endif
#endif
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
index 1d630f371f77..d2e63743e592 100644
--- a/include/crypto/polyval.h
+++ b/include/crypto/polyval.h
@@ -8,15 +8,7 @@
#ifndef _CRYPTO_POLYVAL_H
#define _CRYPTO_POLYVAL_H
-#include <linux/types.h>
-#include <linux/crypto.h>
-
#define POLYVAL_BLOCK_SIZE 16
#define POLYVAL_DIGEST_SIZE 16
-void polyval_mul_non4k(u8 *op1, const u8 *op2);
-
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator);
-
#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index b7f308977c84..81098e00c08f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -104,9 +104,6 @@ static inline int restrict_link_by_digsig(struct key *dest_keyring,
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
-extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 6abe5102e5fb..f8224cc390f8 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -15,20 +15,6 @@
struct crypto_rng;
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt: number of RNG generate requests
- * @generate_tlen: total data size of generated data by the RNG
- * @seed_cnt: number of times the RNG was seeded
- * @err_cnt: number of error for RNG requests
- */
-struct crypto_istat_rng {
- atomic64_t generate_cnt;
- atomic64_t generate_tlen;
- atomic64_t seed_cnt;
- atomic64_t err_cnt;
-};
-
/**
* struct rng_alg - random number generator definition
*
@@ -46,7 +32,6 @@ struct crypto_istat_rng {
* size of the seed is defined with @seedsize .
* @set_ent: Set entropy that would otherwise be obtained from
* entropy source. Internal use only.
- * @stat: Statistics for rng algorithm
* @seedsize: The seed size required for a random number generator
* initialization defined with this variable. Some
* random number generators does not require a seed
@@ -63,10 +48,6 @@ struct rng_alg {
void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
unsigned int len);
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_rng stat;
-#endif
-
unsigned int seedsize;
struct crypto_alg base;
@@ -121,12 +102,10 @@ static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
}
/**
- * crypto_rng_alg - obtain name of RNG
- * @tfm: cipher handle
- *
- * Return the generic name (cra_name) of the initialized random number generator
+ * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
+ * @tfm: RNG handle
*
- * Return: generic name string
+ * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
@@ -144,26 +123,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
-static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&rng_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -182,17 +141,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct rng_alg *alg = crypto_rng_alg(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_rng *istat = rng_get_stat(alg);
-
- atomic64_inc(&istat->generate_cnt);
- atomic64_add(dlen, &istat->generate_tlen);
- }
-
- return crypto_rng_errstat(alg,
- alg->generate(tfm, src, slen, dst, dlen));
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 32fc4473175b..15ab743f68c8 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,11 +11,64 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <crypto/algapi.h>
-
+#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -26,79 +79,229 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
sg_mark_end(head);
}
-static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
+static inline void scatterwalk_start(struct scatter_walk *walk,
+ struct scatterlist *sg)
+{
+ walk->sg = sg;
+ walk->offset = sg->offset;
+}
+
+/*
+ * This is equivalent to scatterwalk_start(walk, sg) followed by
+ * scatterwalk_skip(walk, pos).
+ */
+static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
+ struct scatterlist *sg,
+ unsigned int pos)
{
- unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
- unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
- return len_this_page > len ? len : len_this_page;
+ while (pos > sg->length) {
+ pos -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + pos;
}
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
unsigned int nbytes)
{
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- return nbytes > len_this_page ? len_this_page : nbytes;
+ unsigned int len_this_sg;
+ unsigned int limit;
+
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
+
+ /*
+ * HIGHMEM case: the page may have to be mapped into memory. To avoid
+ * the complexity of having to map multiple pages at once per sg entry,
+ * clamp the returned length to not cross a page boundary.
+ *
+ * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
+ * already mapped contiguously in the kernel's direct map. For improved
+ * performance, allow the walker to return data segments that cross a
+ * page boundary. Do still cap the length to PAGE_SIZE, since some
+ * users rely on that to avoid disabling preemption for too long when
+ * using SIMD. It's also needed for when skcipher_walk uses a bounce
+ * page due to the data not being aligned to the algorithm's alignmask.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ limit = PAGE_SIZE - offset_in_page(walk->offset);
+ else
+ limit = PAGE_SIZE;
+
+ return min3(nbytes, len_this_sg, limit);
+}
+
+/*
+ * Create a scatterlist that represents the remaining data in a walk. Uses
+ * chaining to reference the original scatterlist, so this uses at most two
+ * entries in @sg_out regardless of the number of entries in the original list.
+ * Assumes that sg_init_table() was already done.
+ */
+static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
+ struct scatterlist sg_out[2])
+{
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ sg_set_page(sg_out, sg_page(walk->sg),
+ walk->sg->offset + walk->sg->length - walk->offset,
+ walk->offset);
+ scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
}
-static inline void scatterwalk_advance(struct scatter_walk *walk,
- unsigned int nbytes)
+static inline void scatterwalk_map(struct scatter_walk *walk)
{
- walk->offset += nbytes;
+ struct page *base_page = sg_page(walk->sg);
+ unsigned int offset = walk->offset;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ struct page *page;
+
+ page = nth_page(base_page, offset >> PAGE_SHIFT);
+ offset = offset_in_page(offset);
+ addr = kmap_local_page(page) + offset;
+ } else {
+ /*
+ * When !HIGHMEM we allow the walker to return segments that
+ * span a page boundary; see scatterwalk_clamp(). To make it
+ * clear that in this case we're working in the linear buffer of
+ * the whole sg entry in the kernel's direct map rather than
+ * within the mapped buffer of a single page, compute the
+ * address as an offset from the page_address() of the first
+ * page of the sg entry. Either way the result is the address
+ * in the direct map, but this makes it clearer what is really
+ * going on.
+ */
+ addr = page_address(base_page) + offset;
+ }
+
+ walk->__addr = addr;
}
-static inline struct page *scatterwalk_page(struct scatter_walk *walk)
+/**
+ * scatterwalk_next() - Get the next data buffer in a scatterlist walk
+ * @walk: the scatter_walk
+ * @total: the total number of bytes remaining, > 0
+ *
+ * A virtual address for the next segment of data from the scatterlist will
+ * be placed into @walk->addr. The caller must call scatterwalk_done_src()
+ * or scatterwalk_done_dst() when it is done using this virtual address.
+ *
+ * Returns: the next number of bytes available, <= @total
+ */
+static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
+ unsigned int total)
{
- return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ unsigned int nbytes = scatterwalk_clamp(walk, total);
+
+ scatterwalk_map(walk);
+ return nbytes;
}
-static inline void scatterwalk_unmap(void *vaddr)
+static inline void scatterwalk_unmap(struct scatter_walk *walk)
{
- kunmap_local(vaddr);
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ kunmap_local(walk->__addr);
}
-static inline void scatterwalk_start(struct scatter_walk *walk,
- struct scatterlist *sg)
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- walk->sg = sg;
- walk->offset = sg->offset;
+ walk->offset += nbytes;
}
-static inline void *scatterwalk_map(struct scatter_walk *walk)
+/**
+ * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address was not written to, i.e. it is source data.
+ */
+static inline void scatterwalk_done_src(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- return kmap_local_page(scatterwalk_page(walk)) +
- offset_in_page(walk->offset);
+ scatterwalk_unmap(walk);
+ scatterwalk_advance(walk, nbytes);
}
-static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
+/**
+ * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address may have been written to, i.e. it is
+ * destination data.
+ */
+static inline void scatterwalk_done_dst(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- if (out) {
- struct page *page;
+ scatterwalk_unmap(walk);
+ /*
+ * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
+ * relying on flush_dcache_page() being a no-op when not implemented,
+ * since otherwise the BUG_ON in sg_page() does not get optimized out.
+ * This also avoids having to consider whether the loop would get
+ * reliably optimized out or not.
+ */
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
+ struct page *base_page;
+ unsigned int offset;
+ int start, end, i;
- page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- flush_dcache_page(page);
+ base_page = sg_page(walk->sg);
+ offset = walk->offset;
+ start = offset >> PAGE_SHIFT;
+ end = start + (nbytes >> PAGE_SHIFT);
+ end += (offset_in_page(offset) + offset_in_page(nbytes) +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = start; i < end; i++)
+ flush_dcache_page(nth_page(base_page, i));
}
-
- if (more && walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, sg_next(walk->sg));
+ scatterwalk_advance(walk, nbytes);
}
-static inline void scatterwalk_done(struct scatter_walk *walk, int out,
- int more)
-{
- if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
- !(walk->offset & (PAGE_SIZE - 1)))
- scatterwalk_pagedone(walk, out, more);
-}
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
+
+void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes);
+
+void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes);
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out);
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out);
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes);
+
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
+/* In new code, please use memcpy_{from,to}_sglist() directly instead. */
+static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ unsigned int start,
+ unsigned int nbytes, int out)
+{
+ if (out)
+ memcpy_to_sglist(sg, start, buf, nbytes);
+ else
+ memcpy_from_sglist(buf, sg, start, nbytes);
+}
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
+
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 044ecea60ac8..f48230b1413c 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -10,6 +10,7 @@
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
+#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer)
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -25,14 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
/*
* An implementation of SHA-1's compression function. Don't use in new code!
* You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 2e0e7c3827d1..62701d136c79 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -10,11 +10,10 @@
#include <crypto/internal/hash.h>
#include <crypto/sha1.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
@@ -32,63 +31,38 @@ static inline int sha1_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE);
struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SHA1_BLOCK_SIZE);
+ return remain;
}
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sha1_block_fn *block_fn)
{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SHA1_BLOCK_SIZE / 8 - 1;
struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ union {
+ __be64 b64[SHA1_BLOCK_SIZE / 4];
+ u8 u8[SHA1_BLOCK_SIZE * 2];
+ } block = {};
+
+ if (len >= bit_offset * 8)
+ bit_offset += SHA1_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SHA1_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -102,7 +76,6 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index b9e9281d76c9..4912572578dc 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -13,12 +13,14 @@
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
+#define SHA256_STATE_WORDS 8
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
+#define SHA512_STATE_SIZE 80
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
@@ -64,9 +66,19 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
+struct crypto_sha256_state {
+ u32 state[SHA256_STATE_WORDS];
u64 count;
+};
+
+struct sha256_state {
+ union {
+ struct crypto_sha256_state ctx;
+ struct {
+ u32 state[SHA256_STATE_WORDS];
+ u64 count;
+ };
+ };
u8 buf[SHA256_BLOCK_SIZE];
};
@@ -76,31 +88,7 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
- *
- * For details see lib/crypto/sha256.c
- */
-
-static inline void sha256_init(struct sha256_state *sctx)
+static inline void sha256_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
@@ -112,11 +100,16 @@ static inline void sha256_init(struct sha256_state *sctx)
sctx->state[7] = SHA256_H7;
sctx->count = 0;
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha256_final(struct sha256_state *sctx, u8 *out);
-void sha256(const u8 *data, unsigned int len, u8 *out);
-static inline void sha224_init(struct sha256_state *sctx)
+static inline void sha256_init(struct sha256_state *sctx)
+{
+ sha256_block_init(&sctx->ctx);
+}
+void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len);
+void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]);
+void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]);
+
+static inline void sha224_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
@@ -128,7 +121,12 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
+
+static inline void sha224_init(struct sha256_state *sctx)
+{
+ sha224_block_init(&sctx->ctx);
+}
/* Simply use sha256_update as it is equivalent to sha224_update. */
-void sha224_final(struct sha256_state *sctx, u8 *out);
+void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
deleted file mode 100644
index ab904d82236f..000000000000
--- a/include/crypto/sha256_base.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha256_base.h - core logic for SHA-256 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA256_BASE_H
-#define _CRYPTO_SHA256_BASE_H
-
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha224_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha224_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha256_init(sctx);
- return 0;
-}
-
-static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA256_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_update(sctx, data, len, block_fn);
-}
-
-static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
- sha256_block_fn *block_fn)
-{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_finalize(sctx, block_fn);
-}
-
-static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
- unsigned int digest_size)
-{
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_finish(sctx, out, digest_size);
-}
-
-#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index 080f60c2e6b1..41e1b83a6d91 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -5,30 +5,32 @@
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
+#include <linux/types.h>
+
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
+#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
+#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
+#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
+#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1
-struct sha3_state {
- u64 st[25];
- unsigned int rsiz;
- unsigned int rsizw;
+#define SHA3_STATE_SIZE 200
- unsigned int partial;
- u8 buf[SHA3_224_BLOCK_SIZE];
+struct shash_desc;
+
+struct sha3_state {
+ u64 st[SHA3_STATE_SIZE / 8];
};
int crypto_sha3_init(struct shash_desc *desc);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out);
#endif
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index b370b3340b16..aa814bab442d 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -10,11 +10,11 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
int blocks);
@@ -53,66 +53,51 @@ static inline int sha512_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha512_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA512_BLOCK_SIZE);
struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ len -= remain;
sctx->count[0] += len;
if (sctx->count[0] < len)
sctx->count[1]++;
-
- if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA512_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA512_BLOCK_SIZE;
- len %= SHA512_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA512_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
+ block_fn(sctx, data, len / SHA512_BLOCK_SIZE);
+ return remain;
}
-static inline int sha512_base_do_finalize(struct shash_desc *desc,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
- const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ unsigned int bit_offset = SHA512_BLOCK_SIZE / 8 - 2;
struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ union {
+ __be64 b64[SHA512_BLOCK_SIZE / 4];
+ u8 u8[SHA512_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SHA512_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buf, 1);
+ remain = sha512_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- block_fn(sctx, sctx->buf, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SHA512_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count[0] += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count[1] << 3 |
+ sctx->count[0] >> 61);
+ block.b64[bit_offset + 1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, block.u8, (bit_offset + 2) * 8 / SHA512_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -126,9 +111,10 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
index d25186bb2be3..fa6dafafab3f 100644
--- a/include/crypto/sig.h
+++ b/include/crypto/sig.h
@@ -20,6 +20,57 @@ struct crypto_sig {
};
/**
+ * struct sig_alg - generic public key signature algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. On success, the signature size is returned.
+ * Optional.
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Optional.
+ * @set_pub_key: Function invokes the algorithm specific set public key
+ * function, which knows how to decode and interpret
+ * the BER encoded public key and parameters. Mandatory.
+ * @set_priv_key: Function invokes the algorithm specific set private key
+ * function, which knows how to decode and interpret
+ * the BER encoded private key and parameters. Optional.
+ * @key_size: Function returns key size. Mandatory.
+ * @digest_size: Function returns maximum digest size. Optional.
+ * @max_size: Function returns maximum signature size. Optional.
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct sig_alg {
+ int (*sign)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+ int (*verify)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+ int (*set_pub_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ int (*set_priv_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ unsigned int (*key_size)(struct crypto_sig *tfm);
+ unsigned int (*digest_size)(struct crypto_sig *tfm);
+ unsigned int (*max_size)(struct crypto_sig *tfm);
+ int (*init)(struct crypto_sig *tfm);
+ void (*exit)(struct crypto_sig *tfm);
+
+ struct crypto_alg base;
+};
+
+/**
* DOC: Generic Public Key Signature API
*
* The Public Key Signature API is used with the algorithms of type
@@ -47,6 +98,21 @@ static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
return &tfm->base;
}
+static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_sig, base);
+}
+
+static inline struct sig_alg *__crypto_sig_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct sig_alg, base);
+}
+
+static inline struct sig_alg *crypto_sig_alg(struct crypto_sig *tfm)
+{
+ return __crypto_sig_alg(crypto_sig_tfm(tfm)->__crt_alg);
+}
+
/**
* crypto_free_sig() - free signature tfm handle
*
@@ -60,16 +126,55 @@ static inline void crypto_free_sig(struct crypto_sig *tfm)
}
/**
- * crypto_sig_maxsize() - Get len for output buffer
+ * crypto_sig_keysize() - Get key size
*
- * Function returns the dest buffer size required for a given key.
+ * Function returns the key size in bits.
* Function assumes that the key is already set in the transformation. If this
- * function is called without a setkey or with a failed setkey, you will end up
+ * function is called without a setkey or with a failed setkey, you may end up
* in a NULL dereference.
*
* @tfm: signature tfm handle allocated with crypto_alloc_sig()
*/
-int crypto_sig_maxsize(struct crypto_sig *tfm);
+static inline unsigned int crypto_sig_keysize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->key_size(tfm);
+}
+
+/**
+ * crypto_sig_digestsize() - Get maximum digest size
+ *
+ * Function returns the maximum digest size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_digestsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->digest_size(tfm);
+}
+
+/**
+ * crypto_sig_maxsize() - Get maximum signature size
+ *
+ * Function returns the maximum signature size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->max_size(tfm);
+}
/**
* crypto_sig_sign() - Invoke signing operation
@@ -82,11 +187,16 @@ int crypto_sig_maxsize(struct crypto_sig *tfm);
* @dst: destination obuffer
* @dlen: destination length
*
- * Return: zero on success; error code in case of error
+ * Return: signature size on success; error code in case of error
*/
-int crypto_sig_sign(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- void *dst, unsigned int dlen);
+static inline int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->sign(tfm, src, slen, dst, dlen);
+}
/**
* crypto_sig_verify() - Invoke signature verification
@@ -102,9 +212,14 @@ int crypto_sig_sign(struct crypto_sig *tfm,
*
* Return: zero on verification success; error code in case of error.
*/
-int crypto_sig_verify(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- const void *digest, unsigned int dlen);
+static inline int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->verify(tfm, src, slen, digest, dlen);
+}
/**
* crypto_sig_set_pubkey() - Invoke set public key operation
@@ -119,8 +234,13 @@ int crypto_sig_verify(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_pubkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_pub_key(tfm, key, keylen);
+}
/**
* crypto_sig_set_privkey() - Invoke set private key operation
@@ -135,6 +255,11 @@ int crypto_sig_set_pubkey(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_privkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_priv_key(tfm, key, keylen);
+}
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index c8857d7bdb37..9e5853464345 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,28 +65,6 @@ struct crypto_lskcipher {
};
/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat;
-#else
-#define SKCIPHER_ALG_COMMON_STAT
-#endif
-
-/*
* struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
@@ -103,7 +81,6 @@ struct crypto_istat_cipher {
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
* @statesize: Size of the internal state for the algorithm.
- * @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*/
#define SKCIPHER_ALG_COMMON { \
@@ -113,8 +90,6 @@ struct crypto_istat_cipher {
unsigned int chunksize; \
unsigned int statesize; \
\
- SKCIPHER_ALG_COMMON_STAT \
- \
struct crypto_alg base; \
}
struct skcipher_alg_common SKCIPHER_ALG_COMMON;
@@ -239,16 +214,17 @@ struct lskcipher_alg {
#define MAX_SYNC_SKCIPHER_REQSIZE 384
/*
- * This performs a type-check against the "tfm" argument to make sure
+ * This performs a type-check against the "_tfm" argument to make sure
* all users have the correct skcipher tfm for doing on-stack requests.
*/
-#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- MAX_SYNC_SKCIPHER_REQSIZE + \
- (!(sizeof((struct crypto_sync_skcipher *)1 == \
- (typeof(tfm))1))) \
+ MAX_SYNC_SKCIPHER_REQSIZE \
] CRYPTO_MINALIGN_ATTR; \
- struct skcipher_request *name = (void *)__##name##_desc
+ struct skcipher_request *name = \
+ (((struct skcipher_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_skcipher_tfm((_tfm)), \
+ (void *)__##name##_desc)
/**
* DOC: Symmetric Key Cipher API
@@ -336,6 +312,12 @@ static inline struct crypto_tfm *crypto_lskcipher_tfm(
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_skcipher_tfm(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_tfm(&tfm->base);
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
@@ -861,19 +843,20 @@ static inline struct skcipher_request *skcipher_request_cast(
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct skcipher_request *skcipher_request_alloc(
+static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp)
{
struct skcipher_request *req;
- req = kmalloc(sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
+#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
/**
* skcipher_request_free() - zeroize and free request data structure
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
deleted file mode 100644
index 04a92c1013c8..000000000000
--- a/include/crypto/sm2.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * sm2.h - SM2 asymmetric public-key algorithm
- * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
- * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
- *
- * Copyright (c) 2020, Alibaba Group.
- * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#ifndef _CRYPTO_SM2_H
-#define _CRYPTO_SM2_H
-
-struct shash_desc;
-
-#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
-int sm2_compute_z_digest(struct shash_desc *desc,
- const void *key, unsigned int keylen, void *dgst);
-#else
-static inline int sm2_compute_z_digest(struct shash_desc *desc,
- const void *key, unsigned int keylen,
- void *dgst)
-{
- return -ENOTSUPP;
-}
-#endif
-
-#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1f021ad0533f..c8d02c86c298 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -14,6 +14,7 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
+#define SM3_STATE_SIZE 40
#define SM3_T1 0x79CC4519
#define SM3_T2 0x7A879D8A
@@ -58,7 +59,6 @@ static inline void sm3_init(struct sm3_state *sctx)
sctx->count = 0;
}
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
-void sm3_final(struct sm3_state *sctx, u8 *out);
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 2f3a32ab97bb..7c53570bc05e 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -11,87 +11,59 @@
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
-#include <linux/crypto.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
static inline int sm3_base_init(struct shash_desc *desc)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-
+ sm3_init(shash_desc_ctx(desc));
return 0;
}
-static inline int sm3_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ sm3_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
struct sm3_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SM3_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SM3_BLOCK_SIZE);
+ return remain;
}
-static inline int sm3_base_do_finalize(struct shash_desc *desc,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sm3_block_fn *block_fn)
{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
struct sm3_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ union {
+ __be64 b64[SM3_BLOCK_SIZE / 4];
+ u8 u8[SM3_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SM3_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buffer, 1);
+ remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SM3_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index cae1b4a01971..570f720a113b 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,15 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- union {
- u8 buffer[STREEBOG_BLOCK_SIZE];
- struct streebog_uint512 m;
- };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
struct streebog_uint512 Sigma;
- size_t fillsize;
};
#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
index acbb917a00c6..2594f45777b5 100644
--- a/include/crypto/utils.h
+++ b/include/crypto/utils.h
@@ -7,7 +7,7 @@
#ifndef _CRYPTO_UTILS_H
#define _CRYPTO_UTILS_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/compiler_attributes.h>
#include <linux/types.h>