summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/crypto/Kconfig9
-rw-r--r--lib/crypto/Makefile1
-rw-r--r--lib/crypto/blake2b.c44
-rw-r--r--lib/crypto/blake2s.c38
-rw-r--r--lib/crypto/riscv/chacha-riscv64-zvkb.S5
5 files changed, 44 insertions, 53 deletions
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index a3647352bff6..6871a41e5069 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -61,7 +61,8 @@ config CRYPTO_LIB_CHACHA_ARCH
default y if ARM64 && KERNEL_MODE_NEON
default y if MIPS && CPU_MIPS32_R2
default y if PPC64 && CPU_LITTLE_ENDIAN && VSX
- default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
default y if S390
default y if X86_64
@@ -184,7 +185,8 @@ config CRYPTO_LIB_SHA256_ARCH
default y if ARM64
default y if MIPS && CPU_CAVIUM_OCTEON
default y if PPC && SPE
- default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
default y if S390
default y if SPARC64
default y if X86_64
@@ -202,7 +204,8 @@ config CRYPTO_LIB_SHA512_ARCH
default y if ARM && !CPU_V7M
default y if ARM64
default y if MIPS && CPU_CAVIUM_OCTEON
- default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
default y if S390
default y if SPARC64
default y if X86_64
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index b5346cebbb55..330ab65b29c4 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_LIB_BLAKE2B) += libblake2b.o
libblake2b-y := blake2b.o
-CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y)
CFLAGS_blake2b.o += -I$(src)/$(SRCARCH)
libblake2b-$(CONFIG_ARM) += arm/blake2b-neon-core.o
diff --git a/lib/crypto/blake2b.c b/lib/crypto/blake2b.c
index 09c6d65d8a6e..581b7f8486fa 100644
--- a/lib/crypto/blake2b.c
+++ b/lib/crypto/blake2b.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/unroll.h>
#include <linux/types.h>
static const u8 blake2b_sigma[12][16] = {
@@ -73,31 +74,26 @@ blake2b_compress_generic(struct blake2b_ctx *ctx,
b = ror64(b ^ c, 63); \
} while (0)
-#define ROUND(r) do { \
- G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
- G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
- G(r, 2, v[2], v[ 6], v[10], v[14]); \
- G(r, 3, v[3], v[ 7], v[11], v[15]); \
- G(r, 4, v[0], v[ 5], v[10], v[15]); \
- G(r, 5, v[1], v[ 6], v[11], v[12]); \
- G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
- G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
-} while (0)
- ROUND(0);
- ROUND(1);
- ROUND(2);
- ROUND(3);
- ROUND(4);
- ROUND(5);
- ROUND(6);
- ROUND(7);
- ROUND(8);
- ROUND(9);
- ROUND(10);
- ROUND(11);
-
+#ifdef CONFIG_64BIT
+ /*
+ * Unroll the rounds loop to enable constant-folding of the
+ * blake2b_sigma values. Seems worthwhile on 64-bit kernels.
+ * Not worthwhile on 32-bit kernels because the code size is
+ * already so large there due to BLAKE2b using 64-bit words.
+ */
+ unrolled_full
+#endif
+ for (int r = 0; r < 12; r++) {
+ G(r, 0, v[0], v[4], v[8], v[12]);
+ G(r, 1, v[1], v[5], v[9], v[13]);
+ G(r, 2, v[2], v[6], v[10], v[14]);
+ G(r, 3, v[3], v[7], v[11], v[15]);
+ G(r, 4, v[0], v[5], v[10], v[15]);
+ G(r, 5, v[1], v[6], v[11], v[12]);
+ G(r, 6, v[2], v[7], v[8], v[13]);
+ G(r, 7, v[3], v[4], v[9], v[14]);
+ }
#undef G
-#undef ROUND
for (i = 0; i < 8; ++i)
ctx->h[i] ^= v[i] ^ v[i + 8];
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 6182c21ed943..71578a084742 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/unroll.h>
#include <linux/types.h>
static const u8 blake2s_sigma[10][16] = {
@@ -71,29 +72,22 @@ blake2s_compress_generic(struct blake2s_ctx *ctx,
b = ror32(b ^ c, 7); \
} while (0)
-#define ROUND(r) do { \
- G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
- G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
- G(r, 2, v[2], v[ 6], v[10], v[14]); \
- G(r, 3, v[3], v[ 7], v[11], v[15]); \
- G(r, 4, v[0], v[ 5], v[10], v[15]); \
- G(r, 5, v[1], v[ 6], v[11], v[12]); \
- G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
- G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
-} while (0)
- ROUND(0);
- ROUND(1);
- ROUND(2);
- ROUND(3);
- ROUND(4);
- ROUND(5);
- ROUND(6);
- ROUND(7);
- ROUND(8);
- ROUND(9);
-
+ /*
+ * Unroll the rounds loop to enable constant-folding of the
+ * blake2s_sigma values.
+ */
+ unrolled_full
+ for (int r = 0; r < 10; r++) {
+ G(r, 0, v[0], v[4], v[8], v[12]);
+ G(r, 1, v[1], v[5], v[9], v[13]);
+ G(r, 2, v[2], v[6], v[10], v[14]);
+ G(r, 3, v[3], v[7], v[11], v[15]);
+ G(r, 4, v[0], v[5], v[10], v[15]);
+ G(r, 5, v[1], v[6], v[11], v[12]);
+ G(r, 6, v[2], v[7], v[8], v[13]);
+ G(r, 7, v[3], v[4], v[9], v[14]);
+ }
#undef G
-#undef ROUND
for (i = 0; i < 8; ++i)
ctx->h[i] ^= v[i] ^ v[i + 8];
diff --git a/lib/crypto/riscv/chacha-riscv64-zvkb.S b/lib/crypto/riscv/chacha-riscv64-zvkb.S
index b777d0b4e379..3d183ec818f5 100644
--- a/lib/crypto/riscv/chacha-riscv64-zvkb.S
+++ b/lib/crypto/riscv/chacha-riscv64-zvkb.S
@@ -60,7 +60,8 @@
#define VL t2
#define STRIDE t3
#define ROUND_CTR t4
-#define KEY0 s0
+#define KEY0 t5
+// Avoid s0/fp to allow for unwinding
#define KEY1 s1
#define KEY2 s2
#define KEY3 s3
@@ -143,7 +144,6 @@
// The updated 32-bit counter is written back to state->x[12] before returning.
SYM_FUNC_START(chacha_zvkb)
addi sp, sp, -96
- sd s0, 0(sp)
sd s1, 8(sp)
sd s2, 16(sp)
sd s3, 24(sp)
@@ -280,7 +280,6 @@ SYM_FUNC_START(chacha_zvkb)
bnez NBLOCKS, .Lblock_loop
sw COUNTER, 48(STATEP)
- ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)