diff options
Diffstat (limited to 'include/crypto/internal')
-rw-r--r-- | include/crypto/internal/acompress.h | 177 | ||||
-rw-r--r-- | include/crypto/internal/akcipher.h | 4 | ||||
-rw-r--r-- | include/crypto/internal/blake2b.h | 92 | ||||
-rw-r--r-- | include/crypto/internal/blockhash.h | 52 | ||||
-rw-r--r-- | include/crypto/internal/chacha.h | 43 | ||||
-rw-r--r-- | include/crypto/internal/cryptouser.h | 16 | ||||
-rw-r--r-- | include/crypto/internal/ecc.h | 35 | ||||
-rw-r--r-- | include/crypto/internal/engine.h | 5 | ||||
-rw-r--r-- | include/crypto/internal/geniv.h | 1 | ||||
-rw-r--r-- | include/crypto/internal/hash.h | 137 | ||||
-rw-r--r-- | include/crypto/internal/poly1305.h | 28 | ||||
-rw-r--r-- | include/crypto/internal/rsa.h | 29 | ||||
-rw-r--r-- | include/crypto/internal/scompress.h | 30 | ||||
-rw-r--r-- | include/crypto/internal/sha2.h | 66 | ||||
-rw-r--r-- | include/crypto/internal/sig.h | 80 | ||||
-rw-r--r-- | include/crypto/internal/simd.h | 22 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 59 |
17 files changed, 625 insertions, 251 deletions
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 4ac46bafba9d..ffffd88bbbad 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -11,13 +11,23 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/compiler_types.h> +#include <linux/cpumask_types.h> +#include <linux/spinlock.h> +#include <linux/workqueue_types.h> + +#define ACOMP_FBREQ_ON_STACK(name, req) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_fbreq_on_stack_init( \ + __##name##_req, (req)) /** * struct acomp_alg - asynchronous compression algorithm * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the algorithm * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -30,26 +40,70 @@ * counterpart to @init, used to remove various changes set in * @init. * - * @reqsize: Context size for (de)compression requests - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); int (*init)(struct crypto_acomp *tfm); void (*exit)(struct crypto_acomp *tfm); - unsigned int reqsize; - union { struct COMP_ALG_COMMON; struct comp_alg_common calg; }; }; +struct crypto_acomp_stream { + spinlock_t lock; + void *ctx; +}; + +struct crypto_acomp_streams { + /* These must come first because of struct scomp_alg. */ + void *(*alloc_ctx)(void); + union { + void (*free_ctx)(void *); + void (*cfree_ctx)(const void *); + }; + + struct crypto_acomp_stream __percpu *streams; + struct work_struct stream_work; + cpumask_t stream_want; +}; + +struct acomp_walk { + union { + /* Virtual address of the source. */ + struct { + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; + + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + + unsigned int slen; + unsigned int dlen; + + int flags; +}; + /* * Transform internal helpers. */ @@ -69,21 +123,6 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) -{ - struct acomp_req *req; - - req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); - if (likely(req)) - acomp_request_set_tfm(req, tfm); - return req; -} - -static inline void __acomp_request_free(struct acomp_req *req) -{ - kfree_sensitive(req); -} - /** * crypto_register_acomp() -- Register asynchronous compression algorithm * @@ -109,4 +148,100 @@ void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); +static inline bool acomp_request_issg(struct acomp_req *req) +{ + return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT)); +} + +static inline bool acomp_request_src_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +static inline bool acomp_request_dst_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT; +} + +static inline bool acomp_request_isvirt(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); +} + +static inline bool acomp_request_src_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA; +} + +static inline bool acomp_request_dst_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA; +} + +static inline bool acomp_request_isnondma(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm) +{ + return crypto_tfm_req_virt(&tfm->base); +} + +void crypto_acomp_free_streams(struct crypto_acomp_streams *s); +int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s); + +struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( + struct crypto_acomp_streams *s) __acquires(stream); + +static inline void crypto_acomp_unlock_stream_bh( + struct crypto_acomp_stream *stream) __releases(stream) +{ + spin_unlock_bh(&stream->lock); +} + +void acomp_walk_done_src(struct acomp_walk *walk, int used); +void acomp_walk_done_dst(struct acomp_walk *walk, int used); +int acomp_walk_next_src(struct acomp_walk *walk); +int acomp_walk_next_dst(struct acomp_walk *walk); +int acomp_walk_virt(struct acomp_walk *__restrict walk, + struct acomp_req *__restrict req, bool atomic); + +static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur) +{ + return walk->slen != cur; +} + +static inline u32 acomp_request_flags(struct acomp_req *req) +{ + return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE; +} + +static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm) +{ + return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb); +} + +static inline struct acomp_req *acomp_fbreq_on_stack_init( + char *buf, struct acomp_req *old) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(old); + struct acomp_req *req = (void *)buf; + + crypto_stack_request_init(&req->base, + crypto_acomp_tfm(crypto_acomp_fb(tfm))); + acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL); + req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE; + req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE; + req->src = old->src; + req->dst = old->dst; + req->slen = old->slen; + req->dlen = old->dlen; + + return req; +} + #endif diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index a0fba4b2eccf..14ee62bc52b6 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -124,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg( /** * crypto_register_akcipher() -- Register public key algorithm * - * Function registers an implementation of a public key verify algorithm + * Function registers an implementation of a public key cipher algorithm * * @alg: algorithm definition * @@ -135,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg); /** * crypto_unregister_akcipher() -- Unregister public key algorithm * - * Function unregisters an implementation of a public key verify algorithm + * Function unregisters an implementation of a public key cipher algorithm * * @alg: algorithm definition */ diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h index 982fe5e8471c..3e09e2485306 100644 --- a/include/crypto/internal/blake2b.h +++ b/include/crypto/internal/blake2b.h @@ -7,65 +7,36 @@ #ifndef _CRYPTO_INTERNAL_BLAKE2B_H #define _CRYPTO_INTERNAL_BLAKE2B_H +#include <asm/byteorder.h> #include <crypto/blake2b.h> #include <crypto/internal/hash.h> +#include <linux/array_size.h> +#include <linux/compiler.h> +#include <linux/build_bug.h> +#include <linux/errno.h> +#include <linux/math.h> #include <linux/string.h> - -void blake2b_compress_generic(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); +#include <linux/types.h> static inline void blake2b_set_lastblock(struct blake2b_state *state) { state->f[0] = -1; + state->f[1] = 0; } -typedef void (*blake2b_compress_t)(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); - -static inline void __blake2b_update(struct blake2b_state *state, - const u8 *in, size_t inlen, - blake2b_compress_t compress) +static inline void blake2b_set_nonlast(struct blake2b_state *state) { - const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2B_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); - in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; + state->f[0] = 0; + state->f[1] = 0; } -static inline void __blake2b_final(struct blake2b_state *state, u8 *out, - blake2b_compress_t compress) -{ - int i; - - blake2b_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */ - (*compress)(state, state->buf, 1, state->buflen); - for (i = 0; i < ARRAY_SIZE(state->h); i++) - __cpu_to_le64s(&state->h[i]); - memcpy(out, state->h, state->outlen); -} +typedef void (*blake2b_compress_t)(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); /* Helper functions for shash implementations of BLAKE2b */ struct blake2b_tfm_ctx { - u8 key[BLAKE2B_KEY_SIZE]; + u8 key[BLAKE2B_BLOCK_SIZE]; unsigned int keylen; }; @@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, { struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE) + if (keylen > BLAKE2B_KEY_SIZE) return -EINVAL; + BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE); + memcpy(tctx->key, key, keylen); + memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen); tctx->keylen = keylen; return 0; @@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc) struct blake2b_state *state = shash_desc_ctx(desc); unsigned int outlen = crypto_shash_digestsize(desc->tfm); - __blake2b_init(state, outlen, tctx->key, tctx->keylen); - return 0; + __blake2b_init(state, outlen, tctx->keylen); + return tctx->keylen ? + crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0; } -static inline int crypto_blake2b_update(struct shash_desc *desc, - const u8 *in, unsigned int inlen, - blake2b_compress_t compress) +static inline int crypto_blake2b_update_bo(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); - __blake2b_update(state, in, inlen, compress); - return 0; + blake2b_set_nonlast(state); + compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE); + return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE); } -static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out, +static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in, + unsigned int inlen, u8 *out, blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); + u8 buf[BLAKE2B_BLOCK_SIZE]; + int i; - __blake2b_final(state, out, compress); + memcpy(buf, in, inlen); + memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen); + blake2b_set_lastblock(state); + compress(state, buf, 1, inlen); + for (i = 0; i < ARRAY_SIZE(state->h); i++) + __cpu_to_le64s(&state->h[i]); + memcpy(out, state->h, crypto_shash_digestsize(desc->tfm)); + memzero_explicit(buf, sizeof(buf)); return 0; } diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h new file mode 100644 index 000000000000..52d9d4c82493 --- /dev/null +++ b/include/crypto/internal/blockhash.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Handle partial blocks for block hash. + * + * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H +#define _CRYPTO_INTERNAL_BLOCKHASH_H + +#include <linux/string.h> +#include <linux/types.h> + +#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \ + buf, buflen) \ + ({ \ + typeof(block_fn) *_block_fn = &(block_fn); \ + typeof(state + 0) _state = (state); \ + unsigned int _buflen = (buflen); \ + size_t _nbytes = (nbytes); \ + unsigned int _bs = (bs); \ + const u8 *_src = (src); \ + u8 *_buf = (buf); \ + while ((_buflen + _nbytes) >= _bs) { \ + const u8 *data = _src; \ + size_t len = _nbytes; \ + size_t blocks; \ + int remain; \ + if (_buflen) { \ + remain = _bs - _buflen; \ + memcpy(_buf + _buflen, _src, remain); \ + data = _buf; \ + len = _bs; \ + } \ + remain = len % bs; \ + blocks = (len - remain) / (dv); \ + (*_block_fn)(_state, data, blocks); \ + _src += len - remain - _buflen; \ + _nbytes -= len - remain - _buflen; \ + _buflen = 0; \ + } \ + memcpy(_buf + _buflen, _src, _nbytes); \ + _buflen += _nbytes; \ + }) + +#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \ + BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen) +#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \ + BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen) + +#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */ diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h deleted file mode 100644 index b085dc1ac151..000000000000 --- a/include/crypto/internal/chacha.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef _CRYPTO_INTERNAL_CHACHA_H -#define _CRYPTO_INTERNAL_CHACHA_H - -#include <crypto/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/crypto.h> - -struct chacha_ctx { - u32 key[8]; - int nrounds; -}; - -static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize, int nrounds) -{ - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - int i; - - if (keysize != CHACHA_KEY_SIZE) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ctx->key); i++) - ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); - - ctx->nrounds = nrounds; - return 0; -} - -static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) -{ - return chacha_setkey(tfm, key, keysize, 20); -} - -static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) -{ - return chacha_setkey(tfm, key, keysize, 12); -} - -#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h deleted file mode 100644 index fd54074332f5..000000000000 --- a/include/crypto/internal/cryptouser.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/cryptouser.h> -#include <net/netlink.h> - -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); - -#ifdef CONFIG_CRYPTO_STATS -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); -#else -static inline int crypto_reportstat(struct sk_buff *in_skb, - struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - return -ENOTSUPP; -} -#endif diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h index 4f6c1a68882f..57cd75242141 100644 --- a/include/crypto/internal/ecc.h +++ b/include/crypto/internal/ecc.h @@ -27,13 +27,14 @@ #define _CRYPTO_ECC_H #include <crypto/ecc_curve.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* One digit is u64 qword. */ #define ECC_CURVE_NIST_P192_DIGITS 3 #define ECC_CURVE_NIST_P256_DIGITS 4 #define ECC_CURVE_NIST_P384_DIGITS 6 -#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */ +#define ECC_CURVE_NIST_P521_DIGITS 9 +#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */ #define ECC_DIGITS_TO_BYTES_SHIFT 3 @@ -41,6 +42,18 @@ #define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } +/* + * The integers r and s making up the signature are expected to be + * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES. + * The bytes within each u64 digit are in native endianness, + * but the order of the u64 digits themselves is little endian. + * This format allows direct use by internal vli_*() functions. + */ +struct ecdsa_raw_sig { + u64 r[ECC_MAX_DIGITS]; + u64 s[ECC_MAX_DIGITS]; +}; + /** * ecc_swap_digits() - Copy ndigits from big endian array to native array * @in: Input array @@ -57,6 +70,19 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit } /** + * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array + * @in: Input byte array + * @nbytes Size of input byte array + * @out Output digits array + * @ndigits: Number of digits to create from byte array + * + * The first byte in the input byte array is expected to hold the most + * significant bits of the large integer. + */ +void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, + u64 *out, unsigned int ndigits); + +/** * ecc_is_key_valid() - Validate a given ECDH private key * * @curve_id: id representing the curve to use @@ -81,7 +107,8 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, * Returns 0 if the private key was generated successfully, a negative value * if an error occurred. */ -int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey); +int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, + u64 *private_key); /** * ecc_make_pub_key() - Compute an ECC public key @@ -278,4 +305,6 @@ void ecc_point_mult_shamir(const struct ecc_point *result, const u64 *y, const struct ecc_point *q, const struct ecc_curve *curve); +extern struct crypto_template ecdsa_x962_tmpl; +extern struct crypto_template ecdsa_p1363_tmpl; #endif diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h index fbf4be56cf12..b6a4ea2240fc 100644 --- a/include/crypto/internal/engine.h +++ b/include/crypto/internal/engine.h @@ -27,10 +27,10 @@ struct device; * @retry_support: indication that the hardware allows re-execution * of a failed backlog request * crypto-engine, in head position to keep order + * @rt: whether this queue is set to run as a realtime task * @list: link with the global crypto engine list * @queue_lock: spinlock to synchronise access to request queue * @queue: the crypto queue of the engine - * @rt: whether this queue is set to run as a realtime task * @prepare_crypt_hardware: a request will soon arrive from the queue * so the subsystem requests the driver to prepare the hardware * by issuing this call @@ -51,14 +51,13 @@ struct crypto_engine { bool running; bool retry_support; + bool rt; struct list_head list; spinlock_t queue_lock; struct crypto_queue queue; struct device *dev; - bool rt; - int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); int (*do_batch_requests)(struct crypto_engine *engine); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 7fd7126f593a..012f5fb22d43 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -15,7 +15,6 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_sync_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 58967593b6b4..0f85c543f80b 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -11,21 +11,25 @@ #include <crypto/algapi.h> #include <crypto/hash.h> -struct ahash_request; -struct scatterlist; +/* Set this bit to handle partial blocks in the API. */ +#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000 -struct crypto_hash_walk { - char *data; +/* Set this bit if final requires at least one byte. */ +#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000 - unsigned int offset; - unsigned int flags; +/* Set this bit if finup can deal with multiple blocks. */ +#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000 - struct page *pg; - unsigned int entrylen; +/* This bit is set by the Crypto API if export_core is not supported. */ +#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000 - unsigned int total; - struct scatterlist *sg; -}; +#define HASH_FBREQ_ON_STACK(name, req) \ + char __##name##_req[sizeof(struct ahash_request) + \ + MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = ahash_fbreq_on_stack_init( \ + __##name##_req, (req)) + +struct ahash_request; struct ahash_instance { void (*free)(struct ahash_instance *inst); @@ -57,21 +61,13 @@ struct crypto_shash_spawn { struct crypto_spawn base; }; -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk); - -static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) -{ - return !(walk->entrylen | walk->total); -} - int crypto_register_ahash(struct ahash_alg *alg); void crypto_unregister_ahash(struct ahash_alg *alg); int crypto_register_ahashes(struct ahash_alg *algs, int count); void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); +void ahash_free_singlespawn_instance(struct ahash_instance *inst); int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); @@ -81,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { return crypto_shash_alg_has_setkey(alg) && !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } +static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg) +{ + return crypto_hash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); @@ -210,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err) static inline u32 ahash_request_flags(struct ahash_request *req) { - return req->base.flags; + return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE; } static inline struct crypto_ahash *crypto_spawn_ahash( @@ -270,5 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } +static inline bool ahash_request_isvirt(struct ahash_request *req) +{ + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; +} + +static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm) +{ + return crypto_tfm_req_virt(&tfm->base); +} + +static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm) +{ + return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb); +} + +static inline struct ahash_request *ahash_fbreq_on_stack_init( + char *buf, struct ahash_request *old) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(old); + struct ahash_request *req = (void *)buf; + + crypto_stack_request_init(&req->base, + crypto_ahash_tfm(crypto_ahash_fb(tfm))); + ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL); + req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE; + req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE; + req->src = old->src; + req->result = old->result; + req->nbytes = old->nbytes; + + return req; +} + +/* Return the state size without partial block for block-only algorithms. */ +static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm) +{ + return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1; +} + +/* This can only be used if the request was never cloned. */ +#define HASH_REQUEST_ZERO(name) \ + memzero_explicit(__##name##_req, sizeof(__##name##_req)) + +/** + * crypto_ahash_export_core() - extract core state for message digest + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_ahash_export_core(struct ahash_request *req, void *out); + +/** + * crypto_ahash_import_core() - import core state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_ahash_import_core(struct ahash_request *req, const void *in); + +/** + * crypto_shash_export_core() - extract core state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_shash_export_core(struct shash_desc *desc, void *out); + +/** + * crypto_shash_import_core() - import core state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_shash_import_core(struct shash_desc *desc, const void *in); + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h index 196aa769f296..c60315f47562 100644 --- a/include/crypto/internal/poly1305.h +++ b/include/crypto/internal/poly1305.h @@ -6,9 +6,8 @@ #ifndef _CRYPTO_INTERNAL_POLY1305_H #define _CRYPTO_INTERNAL_POLY1305_H -#include <asm/unaligned.h> -#include <linux/types.h> #include <crypto/poly1305.h> +#include <linux/types.h> /* * Poly1305 core functions. These only accept whole blocks; the caller must @@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state, void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], void *dst); +void poly1305_block_init_arch(struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +void poly1305_block_init_generic(struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit); + +static inline void poly1305_blocks_generic(struct poly1305_block_state *state, + const u8 *src, unsigned int len, + u32 padbit) +{ + poly1305_core_blocks(&state->h, &state->core_r, src, + len / POLY1305_BLOCK_SIZE, padbit); +} + +void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]); + +static inline void poly1305_emit_generic(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]) +{ + poly1305_core_emit(state, nonce, digest); +} + #endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index e870133f4b77..071a1951b992 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -8,6 +8,7 @@ #ifndef _RSA_HELPER_ #define _RSA_HELPER_ #include <linux/types.h> +#include <crypto/akcipher.h> /** * rsa_key - RSA key structure @@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); +#define RSA_PUB (true) +#define RSA_PRIV (false) + +static inline int rsa_set_key(struct crypto_akcipher *child, + unsigned int *key_size, bool is_pubkey, + const void *key, unsigned int keylen) +{ + int err; + + *key_size = 0; + + if (is_pubkey) + err = crypto_akcipher_set_pub_key(child, key, keylen); + else + err = crypto_akcipher_set_priv_key(child, key, keylen); + if (err) + return err; + + /* Find out new modulus size from rsa implementation */ + err = crypto_akcipher_maxsize(child); + if (err > PAGE_SIZE) + return -ENOTSUPP; + + *key_size = err; + return 0; +} + extern struct crypto_template rsa_pkcs1pad_tmpl; +extern struct crypto_template rsassa_pkcs1_tmpl; #endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 858fe3965ae3..533d6c16a491 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -9,12 +9,7 @@ #ifndef _CRYPTO_SCOMP_INT_H #define _CRYPTO_SCOMP_INT_H -#include <crypto/acompress.h> -#include <crypto/algapi.h> - -#define SCOMP_SCRATCH_SIZE 131072 - -struct acomp_req; +#include <crypto/internal/acompress.h> struct crypto_scomp { struct crypto_tfm base; @@ -27,13 +22,11 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure + * @streams: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(struct crypto_scomp *tfm); - void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -42,6 +35,14 @@ struct scomp_alg { void *ctx); union { + struct { + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); + }; + struct crypto_acomp_streams streams; + }; + + union { struct COMP_ALG_COMMON; struct comp_alg_common calg; }; @@ -72,17 +73,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); } -static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) -{ - return crypto_scomp_alg(tfm)->alloc_ctx(tfm); -} - -static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, - void *ctx) -{ - return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); -} - static inline int crypto_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h new file mode 100644 index 000000000000..21a27fd5e198 --- /dev/null +++ b/include/crypto/internal/sha2.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _CRYPTO_INTERNAL_SHA2_H +#define _CRYPTO_INTERNAL_SHA2_H + +#include <crypto/internal/simd.h> +#include <crypto/sha2.h> +#include <linux/compiler_attributes.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/unaligned.h> + +#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) +bool sha256_is_arch_optimized(void); +#else +static inline bool sha256_is_arch_optimized(void) +{ + return false; +} +#endif +void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +static __always_inline void sha256_choose_blocks( + u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks, + bool force_generic, bool force_simd) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic) + sha256_blocks_generic(state, data, nblocks); + else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) && + (force_simd || crypto_simd_usable())) + sha256_blocks_simd(state, data, nblocks); + else + sha256_blocks_arch(state, data, nblocks); +} + +static __always_inline void sha256_finup( + struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE], + size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size, + bool force_generic, bool force_simd) +{ + const size_t bit_offset = SHA256_BLOCK_SIZE - 8; + __be64 *bits = (__be64 *)&buf[bit_offset]; + int i; + + buf[len++] = 0x80; + if (len > bit_offset) { + memset(&buf[len], 0, SHA256_BLOCK_SIZE - len); + sha256_choose_blocks(sctx->state, buf, 1, force_generic, + force_simd); + len = 0; + } + + memset(&buf[len], 0, bit_offset - len); + *bits = cpu_to_be64(sctx->count << 3); + sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd); + + for (i = 0; i < digest_size; i += 4) + put_unaligned_be32(sctx->state[i / 4], out + i); +} + +#endif /* _CRYPTO_INTERNAL_SHA2_H */ diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h index 97cb26ef8115..b16648c1a986 100644 --- a/include/crypto/internal/sig.h +++ b/include/crypto/internal/sig.h @@ -10,8 +10,88 @@ #include <crypto/algapi.h> #include <crypto/sig.h> +struct sig_instance { + void (*free)(struct sig_instance *inst); + union { + struct { + char head[offsetof(struct sig_alg, base)]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + static inline void *crypto_sig_ctx(struct crypto_sig *tfm) { return crypto_tfm_ctx(&tfm->base); } + +/** + * crypto_register_sig() -- Register public key signature algorithm + * + * Function registers an implementation of a public key signature algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_sig(struct sig_alg *alg); + +/** + * crypto_unregister_sig() -- Unregister public key signature algorithm + * + * Function unregisters an implementation of a public key signature algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_sig(struct sig_alg *alg); + +int sig_register_instance(struct crypto_template *tmpl, + struct sig_instance *inst); + +static inline struct sig_instance *sig_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct sig_instance, alg.base); +} + +static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm) +{ + return sig_instance(crypto_tfm_alg_instance(&tfm->base)); +} + +static inline struct crypto_instance *sig_crypto_instance(struct sig_instance + *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline void *sig_instance_ctx(struct sig_instance *inst) +{ + return crypto_instance_ctx(sig_crypto_instance(inst)); +} + +int crypto_grab_sig(struct crypto_sig_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn + *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn + *spawn) +{ + return container_of(spawn->base.alg, struct sig_alg, base); +} #endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h index d2316242a988..9e338e7aafbd 100644 --- a/include/crypto/internal/simd.h +++ b/include/crypto/internal/simd.h @@ -6,6 +6,7 @@ #ifndef _CRYPTO_INTERNAL_SIMD_H #define _CRYPTO_INTERNAL_SIMD_H +#include <asm/simd.h> #include <linux/percpu.h> #include <linux/types.h> @@ -14,11 +15,10 @@ struct simd_skcipher_alg; struct skcipher_alg; -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename); void simd_skcipher_free(struct simd_skcipher_alg *alg); int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, @@ -32,13 +32,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count, struct simd_aead_alg; struct aead_alg; -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename); -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename); -void simd_aead_free(struct simd_aead_alg *alg); - int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs); @@ -52,13 +45,10 @@ void simd_unregister_aeads(struct aead_alg *algs, int count, * This delegates to may_use_simd(), except that this also returns false if SIMD * in crypto code has been temporarily disabled on this CPU by the crypto * self-tests, in order to test the no-SIMD fallback code. This override is - * currently limited to configurations where the extra self-tests are enabled, - * because it might be a bit too invasive to be part of the regular self-tests. - * - * This is a macro so that <asm/simd.h>, which some architectures don't have, - * doesn't have to be included directly here. + * currently limited to configurations where the "full" self-tests are enabled, + * because it might be a bit too invasive to be part of the "fast" self-tests. */ -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS_FULL DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); #define crypto_simd_usable() \ (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 7ae42afdcf3e..d5aa535263f6 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -10,8 +10,8 @@ #include <crypto/algapi.h> #include <crypto/internal/cipher.h> +#include <crypto/scatterwalk.h> #include <crypto/skcipher.h> -#include <linux/list.h> #include <linux/types.h> /* @@ -55,40 +55,6 @@ struct crypto_lskcipher_spawn { struct crypto_spawn base; }; -struct skcipher_walk { - union { - struct { - struct page *page; - unsigned long offset; - } phys; - - struct { - u8 *page; - void *addr; - } virt; - } src, dst; - - struct scatter_walk in; - unsigned int nbytes; - - struct scatter_walk out; - unsigned int total; - - struct list_head buffers; - - u8 *page; - u8 *buffer; - u8 *oiv; - void *iv; - - unsigned int ivsize; - - int flags; - unsigned int blocksize; - unsigned int stride; - unsigned int alignmask; -}; - static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) { @@ -205,22 +171,15 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); -int skcipher_walk_done(struct skcipher_walk *walk, int err); -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req); -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -void skcipher_walk_complete(struct skcipher_walk *walk, int err); - -static inline void skcipher_walk_abort(struct skcipher_walk *walk) -{ - skcipher_walk_done(walk, -ECANCELED); -} +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) { |