diff options
author | Eric Biggers <ebiggers@kernel.org> | 2025-06-19 15:55:35 -0700 |
---|---|---|
committer | Eric Biggers <ebiggers@kernel.org> | 2025-06-30 09:26:20 -0700 |
commit | 22375adaa0d9fbba9646c8e2b099c6e87c97bfae (patch) | |
tree | 242efaa99682042f3e3338483c0bfd3a064eec0f | |
parent | a6d2f48b00f71590e1e4f174a88bd149516e6440 (diff) |
lib/crypto: mips/chacha: Fix clang build and remove unneeded byteswap
The MIPS32r2 ChaCha code has never been buildable with the clang
assembler. First, clang doesn't support the 'rotl' pseudo-instruction:
error: unknown instruction, did you mean: rol, rotr?
Second, clang requires that both operands of the 'wsbh' instruction be
explicitly given:
error: too few operands for instruction
To fix this, align the code with the real instruction set by (1) using
the real instruction 'rotr' instead of the nonstandard pseudo-
instruction 'rotl', and (2) explicitly giving both operands to 'wsbh'.
To make removing the use of 'rotl' a bit easier, also remove the
unnecessary special-casing for big endian CPUs at
.Lchacha_mips_xor_bytes. The tail handling is actually
endian-independent since it processes one byte at a time. On big endian
CPUs the old code byte-swapped SAVED_X, then iterated through it in
reverse order. But the byteswap and reverse iteration canceled out.
Tested with chacha20poly1305-selftest in QEMU using "-M malta" with both
little endian and big endian mips32r2 kernels.
Fixes: 49aa7c00eddf ("crypto: mips/chacha - import 32r2 ChaCha code from Zinc")
Cc: stable@vger.kernel.org
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202505080409.EujEBwA0-lkp@intel.com/
Link: https://lore.kernel.org/r/20250619225535.679301-1-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
-rw-r--r-- | lib/crypto/mips/chacha-core.S | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/lib/crypto/mips/chacha-core.S b/lib/crypto/mips/chacha-core.S index 5755f69cfe00..706aeb850fb0 100644 --- a/lib/crypto/mips/chacha-core.S +++ b/lib/crypto/mips/chacha-core.S @@ -55,17 +55,13 @@ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define MSB 0 #define LSB 3 -#define ROTx rotl -#define ROTR(n) rotr n, 24 #define CPU_TO_LE32(n) \ - wsbh n; \ + wsbh n, n; \ rotr n, 16; #else #define MSB 3 #define LSB 0 -#define ROTx rotr #define CPU_TO_LE32(n) -#define ROTR(n) #endif #define FOR_EACH_WORD(x) \ @@ -192,10 +188,10 @@ CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ xor X(W), X(B); \ xor X(Y), X(C); \ xor X(Z), X(D); \ - rotl X(V), S; \ - rotl X(W), S; \ - rotl X(Y), S; \ - rotl X(Z), S; + rotr X(V), 32 - S; \ + rotr X(W), 32 - S; \ + rotr X(Y), 32 - S; \ + rotr X(Z), 32 - S; .text .set reorder @@ -372,21 +368,19 @@ chacha_crypt_arch: /* First byte */ lbu T1, 0(IN) addiu $at, BYTES, 1 - CPU_TO_LE32(SAVED_X) - ROTR(SAVED_X) xor T1, SAVED_X sb T1, 0(OUT) beqz $at, .Lchacha_mips_xor_done /* Second byte */ lbu T1, 1(IN) addiu $at, BYTES, 2 - ROTx SAVED_X, 8 + rotr SAVED_X, 8 xor T1, SAVED_X sb T1, 1(OUT) beqz $at, .Lchacha_mips_xor_done /* Third byte */ lbu T1, 2(IN) - ROTx SAVED_X, 8 + rotr SAVED_X, 8 xor T1, SAVED_X sb T1, 2(OUT) b .Lchacha_mips_xor_done |