summaryrefslogtreecommitdiff
path: root/lib/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2025-05-02 13:30:53 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2025-05-05 18:20:45 +0800
commit5b90a779bc547939421bfeb333e470658ba94fb6 (patch)
treebb8750b51d4f0f133884e002dae5313d9c763612 /lib/crypto
parent7d2461c7616743d62be0df8f9a5f4a6de29f119a (diff)
crypto: lib/sha256 - Add helpers for block-based shash
Add an internal sha256_finup helper and move the finalisation code from __sha256_final into it. Also add sha256_choose_blocks and CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD so that the Crypto API can use the SIMD block function unconditionally. The Crypto API must not be used in hard IRQs and there is no reason to have a fallback path for hardirqs. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'lib/crypto')
-rw-r--r--lib/crypto/Kconfig8
-rw-r--r--lib/crypto/sha256.c32
2 files changed, 17 insertions, 23 deletions
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 6319358b38c2..1ec1466108cc 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -150,6 +150,14 @@ config CRYPTO_ARCH_HAVE_LIB_SHA256
Declares whether the architecture provides an arch-specific
accelerated implementation of the SHA-256 library interface.
+config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
+ bool
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the SHA-256 library interface
+ that is SIMD-based and therefore not usable in hardirq
+ context.
+
config CRYPTO_LIB_SHA256_GENERIC
tristate
default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 563f09c9f381..2ced29efa181 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/unaligned.h>
/*
* If __DISABLE_EXPORTS is defined, then this file is being compiled for a
@@ -26,14 +25,16 @@
#include "sha256-generic.c"
#endif
+static inline bool sha256_purgatory(void)
+{
+ return __is_defined(__DISABLE_EXPORTS);
+}
+
static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data,
size_t nblocks, bool force_generic)
{
-#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) && !defined(__DISABLE_EXPORTS)
- if (!force_generic)
- return sha256_blocks_arch(state, data, nblocks);
-#endif
- sha256_blocks_generic(state, data, nblocks);
+ sha256_choose_blocks(state, data, nblocks,
+ force_generic || sha256_purgatory(), false);
}
static inline void __sha256_update(struct sha256_state *sctx, const u8 *data,
@@ -79,25 +80,10 @@ EXPORT_SYMBOL(sha256_update);
static inline void __sha256_final(struct sha256_state *sctx, u8 *out,
size_t digest_size, bool force_generic)
{
- const size_t bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- __be64 *bits = (__be64 *)&sctx->buf[bit_offset];
size_t partial = sctx->count % SHA256_BLOCK_SIZE;
- size_t i;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(&sctx->buf[partial], 0, SHA256_BLOCK_SIZE - partial);
- sha256_blocks(sctx->state, sctx->buf, 1, force_generic);
- partial = 0;
- }
-
- memset(&sctx->buf[partial], 0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- sha256_blocks(sctx->state, sctx->buf, 1, force_generic);
-
- for (i = 0; i < digest_size; i += 4)
- put_unaligned_be32(sctx->state[i / 4], out + i);
+ sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size,
+ force_generic || sha256_purgatory(), false);
memzero_explicit(sctx, sizeof(*sctx));
}