From c5b91a17cc72e37fd830bca008369db12e7b9d2d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 1 Oct 2025 12:16:44 +0200 Subject: arm64/simd: Add scoped guard API for kernel mode SIMD Encapsulate kernel_neon_begin() and kernel_neon_end() using a 'ksimd' cleanup guard. This hides the prototype of those functions, allowing them to be changed for arm64 but not ARM, without breaking code that is shared between those architectures (RAID6, AEGIS-128) It probably makes sense to expose this API more widely across architectures, as it affords more flexibility to the arch code to plumb it in, while imposing more rigid rules regarding the start/end bookends appearing in matched pairs. Reviewed-by: Kees Cook Reviewed-by: Mark Brown Reviewed-by: Eric Biggers Reviewed-by: Jonathan Cameron Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/simd.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 8e86c9e70e48..d9f83c478736 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -6,12 +6,15 @@ #ifndef __ASM_SIMD_H #define __ASM_SIMD_H +#include #include #include #include #include #include +#include + #ifdef CONFIG_KERNEL_MODE_NEON /* @@ -40,4 +43,8 @@ static __must_check inline bool may_use_simd(void) { #endif /* ! CONFIG_KERNEL_MODE_NEON */ +DEFINE_LOCK_GUARD_0(ksimd, kernel_neon_begin(), kernel_neon_end()) + +#define scoped_ksimd() scoped_guard(ksimd) + #endif -- cgit From 814f5415d3e3084eeb0550acdee5eca8b4966055 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 1 Oct 2025 12:20:32 +0200 Subject: ARM/simd: Add scoped guard API for kernel mode SIMD Implement the ksimd scoped guard API so that it can be used by code that supports both ARM and arm64. Reviewed-by: Kees Cook Reviewed-by: Eric Biggers Reviewed-by: Jonathan Cameron Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel --- arch/arm/include/asm/simd.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h index be08a8da046f..8549fa8b7253 100644 --- a/arch/arm/include/asm/simd.h +++ b/arch/arm/include/asm/simd.h @@ -2,14 +2,21 @@ #ifndef _ASM_SIMD_H #define _ASM_SIMD_H +#include #include #include #include +#include + static __must_check inline bool may_use_simd(void) { return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq() && !irqs_disabled(); } +DEFINE_LOCK_GUARD_0(ksimd, kernel_neon_begin(), kernel_neon_end()) + +#define scoped_ksimd() scoped_guard(ksimd) + #endif /* _ASM_SIMD_H */ -- cgit From f53d18a4e67eacf665e9d60727d508387f84327b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 1 Oct 2025 13:29:23 +0200 Subject: lib/crypto: Switch ARM and arm64 to 'ksimd' scoped guard API Before modifying the prototypes of kernel_neon_begin() and kernel_neon_end() to accommodate kernel mode FP/SIMD state buffers allocated on the stack, move arm64 to the new 'ksimd' scoped guard API, which encapsulates the calls to those functions. For symmetry, do the same for 32-bit ARM too. Reviewed-by: Eric Biggers Reviewed-by: Jonathan Cameron Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel --- lib/crypto/arm/chacha.h | 11 ++++------- lib/crypto/arm/curve25519.h | 5 ++--- lib/crypto/arm/poly1305.h | 6 ++---- lib/crypto/arm/sha1.h | 13 ++++++------- lib/crypto/arm/sha256.h | 12 ++++++------ lib/crypto/arm/sha512.h | 5 ++--- lib/crypto/arm64/chacha.h | 11 ++++------- lib/crypto/arm64/poly1305.h | 6 ++---- lib/crypto/arm64/sha1.h | 7 +++---- lib/crypto/arm64/sha256.h | 19 ++++++++----------- lib/crypto/arm64/sha512.h | 8 ++++---- 11 files changed, 43 insertions(+), 60 deletions(-) diff --git a/lib/crypto/arm/chacha.h b/lib/crypto/arm/chacha.h index 0cae30f8ee5d..836e49088e98 100644 --- a/lib/crypto/arm/chacha.h +++ b/lib/crypto/arm/chacha.h @@ -12,7 +12,6 @@ #include #include -#include #include asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, @@ -68,9 +67,8 @@ static void hchacha_block_arch(const struct chacha_state *state, if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { hchacha_block_arm(state, out, nrounds); } else { - kernel_neon_begin(); - hchacha_block_neon(state, out, nrounds); - kernel_neon_end(); + scoped_ksimd() + hchacha_block_neon(state, out, nrounds); } } @@ -87,9 +85,8 @@ static void chacha_crypt_arch(struct chacha_state *state, u8 *dst, do { unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - kernel_neon_begin(); - chacha_doneon(state, dst, src, todo, nrounds); - kernel_neon_end(); + scoped_ksimd() + chacha_doneon(state, dst, src, todo, nrounds); bytes -= todo; src += todo; diff --git a/lib/crypto/arm/curve25519.h b/lib/crypto/arm/curve25519.h index f6d66494eb8f..b1a566885e95 100644 --- a/lib/crypto/arm/curve25519.h +++ b/lib/crypto/arm/curve25519.h @@ -25,9 +25,8 @@ static void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], const u8 point[CURVE25519_KEY_SIZE]) { if (static_branch_likely(&have_neon) && crypto_simd_usable()) { - kernel_neon_begin(); - curve25519_neon(out, scalar, point); - kernel_neon_end(); + scoped_ksimd() + curve25519_neon(out, scalar, point); } else { curve25519_generic(out, scalar, point); } diff --git a/lib/crypto/arm/poly1305.h b/lib/crypto/arm/poly1305.h index 0021cf368307..0fe903d8de55 100644 --- a/lib/crypto/arm/poly1305.h +++ b/lib/crypto/arm/poly1305.h @@ -6,7 +6,6 @@ */ #include -#include #include #include #include @@ -32,9 +31,8 @@ static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src, do { unsigned int todo = min_t(unsigned int, len, SZ_4K); - kernel_neon_begin(); - poly1305_blocks_neon(state, src, todo, padbit); - kernel_neon_end(); + scoped_ksimd() + poly1305_blocks_neon(state, src, todo, padbit); len -= todo; src += todo; diff --git a/lib/crypto/arm/sha1.h b/lib/crypto/arm/sha1.h index 29f8bcad0447..3e2d8c7cab9f 100644 --- a/lib/crypto/arm/sha1.h +++ b/lib/crypto/arm/sha1.h @@ -4,7 +4,6 @@ * * Copyright 2025 Google LLC */ -#include #include static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); @@ -22,12 +21,12 @@ static void sha1_blocks(struct sha1_block_state *state, { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && static_branch_likely(&have_neon) && likely(may_use_simd())) { - kernel_neon_begin(); - if (static_branch_likely(&have_ce)) - sha1_ce_transform(state, data, nblocks); - else - sha1_transform_neon(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() { + if (static_branch_likely(&have_ce)) + sha1_ce_transform(state, data, nblocks); + else + sha1_transform_neon(state, data, nblocks); + } } else { sha1_block_data_order(state, data, nblocks); } diff --git a/lib/crypto/arm/sha256.h b/lib/crypto/arm/sha256.h index 7556457b3094..ae7e52dd6e3b 100644 --- a/lib/crypto/arm/sha256.h +++ b/lib/crypto/arm/sha256.h @@ -22,12 +22,12 @@ static void sha256_blocks(struct sha256_block_state *state, { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && static_branch_likely(&have_neon) && likely(may_use_simd())) { - kernel_neon_begin(); - if (static_branch_likely(&have_ce)) - sha256_ce_transform(state, data, nblocks); - else - sha256_block_data_order_neon(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() { + if (static_branch_likely(&have_ce)) + sha256_ce_transform(state, data, nblocks); + else + sha256_block_data_order_neon(state, data, nblocks); + } } else { sha256_block_data_order(state, data, nblocks); } diff --git a/lib/crypto/arm/sha512.h b/lib/crypto/arm/sha512.h index d1b485dd275d..ed9bd81d6d78 100644 --- a/lib/crypto/arm/sha512.h +++ b/lib/crypto/arm/sha512.h @@ -19,9 +19,8 @@ static void sha512_blocks(struct sha512_block_state *state, { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && static_branch_likely(&have_neon) && likely(may_use_simd())) { - kernel_neon_begin(); - sha512_block_data_order_neon(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() + sha512_block_data_order_neon(state, data, nblocks); } else { sha512_block_data_order(state, data, nblocks); } diff --git a/lib/crypto/arm64/chacha.h b/lib/crypto/arm64/chacha.h index ba6c22d46086..ca8c6a8b0578 100644 --- a/lib/crypto/arm64/chacha.h +++ b/lib/crypto/arm64/chacha.h @@ -23,7 +23,6 @@ #include #include -#include #include asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, @@ -65,9 +64,8 @@ static void hchacha_block_arch(const struct chacha_state *state, if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) { hchacha_block_generic(state, out, nrounds); } else { - kernel_neon_begin(); - hchacha_block_neon(state, out, nrounds); - kernel_neon_end(); + scoped_ksimd() + hchacha_block_neon(state, out, nrounds); } } @@ -81,9 +79,8 @@ static void chacha_crypt_arch(struct chacha_state *state, u8 *dst, do { unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - kernel_neon_begin(); - chacha_doneon(state, dst, src, todo, nrounds); - kernel_neon_end(); + scoped_ksimd() + chacha_doneon(state, dst, src, todo, nrounds); bytes -= todo; src += todo; diff --git a/lib/crypto/arm64/poly1305.h b/lib/crypto/arm64/poly1305.h index aed5921ccd9a..b77669767cd6 100644 --- a/lib/crypto/arm64/poly1305.h +++ b/lib/crypto/arm64/poly1305.h @@ -6,7 +6,6 @@ */ #include -#include #include #include #include @@ -31,9 +30,8 @@ static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src, do { unsigned int todo = min_t(unsigned int, len, SZ_4K); - kernel_neon_begin(); - poly1305_blocks_neon(state, src, todo, padbit); - kernel_neon_end(); + scoped_ksimd() + poly1305_blocks_neon(state, src, todo, padbit); len -= todo; src += todo; diff --git a/lib/crypto/arm64/sha1.h b/lib/crypto/arm64/sha1.h index aaef4ebfc5e3..bc7071f1be09 100644 --- a/lib/crypto/arm64/sha1.h +++ b/lib/crypto/arm64/sha1.h @@ -4,7 +4,6 @@ * * Copyright 2025 Google LLC */ -#include #include #include @@ -20,9 +19,9 @@ static void sha1_blocks(struct sha1_block_state *state, do { size_t rem; - kernel_neon_begin(); - rem = __sha1_ce_transform(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() + rem = __sha1_ce_transform(state, data, nblocks); + data += (nblocks - rem) * SHA1_BLOCK_SIZE; nblocks = rem; } while (nblocks); diff --git a/lib/crypto/arm64/sha256.h b/lib/crypto/arm64/sha256.h index 80d06df27d3a..568dff0f276a 100644 --- a/lib/crypto/arm64/sha256.h +++ b/lib/crypto/arm64/sha256.h @@ -4,7 +4,6 @@ * * Copyright 2025 Google LLC */ -#include #include #include @@ -27,17 +26,16 @@ static void sha256_blocks(struct sha256_block_state *state, do { size_t rem; - kernel_neon_begin(); - rem = __sha256_ce_transform(state, - data, nblocks); - kernel_neon_end(); + scoped_ksimd() + rem = __sha256_ce_transform(state, data, + nblocks); + data += (nblocks - rem) * SHA256_BLOCK_SIZE; nblocks = rem; } while (nblocks); } else { - kernel_neon_begin(); - sha256_block_neon(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() + sha256_block_neon(state, data, nblocks); } } else { sha256_block_data_order(state, data, nblocks); @@ -66,9 +64,8 @@ static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx, if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE && len <= 65536 && likely(may_use_simd())) { - kernel_neon_begin(); - sha256_ce_finup2x(ctx, data1, data2, len, out1, out2); - kernel_neon_end(); + scoped_ksimd() + sha256_ce_finup2x(ctx, data1, data2, len, out1, out2); kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE); kmsan_unpoison_memory(out2, SHA256_DIGEST_SIZE); return true; diff --git a/lib/crypto/arm64/sha512.h b/lib/crypto/arm64/sha512.h index ddb0d256f73a..7eb7ef04d268 100644 --- a/lib/crypto/arm64/sha512.h +++ b/lib/crypto/arm64/sha512.h @@ -4,7 +4,7 @@ * * Copyright 2025 Google LLC */ -#include + #include #include @@ -24,9 +24,9 @@ static void sha512_blocks(struct sha512_block_state *state, do { size_t rem; - kernel_neon_begin(); - rem = __sha512_ce_transform(state, data, nblocks); - kernel_neon_end(); + scoped_ksimd() + rem = __sha512_ce_transform(state, data, nblocks); + data += (nblocks - rem) * SHA512_BLOCK_SIZE; nblocks = rem; } while (nblocks); -- cgit