diff options
Diffstat (limited to 'arch/arm/crypto')
-rw-r--r-- | arch/arm/crypto/Kconfig | 41 | ||||
-rw-r--r-- | arch/arm/crypto/Makefile | 21 | ||||
-rw-r--r-- | arch/arm/crypto/sha1-armv4-large.S | 507 | ||||
-rw-r--r-- | arch/arm/crypto/sha1-armv7-neon.S | 634 | ||||
-rw-r--r-- | arch/arm/crypto/sha1-ce-core.S | 123 | ||||
-rw-r--r-- | arch/arm/crypto/sha1-ce-glue.c | 72 | ||||
-rw-r--r-- | arch/arm/crypto/sha1_glue.c | 75 | ||||
-rw-r--r-- | arch/arm/crypto/sha1_neon_glue.c | 83 | ||||
-rw-r--r-- | arch/arm/crypto/sha512-armv4.pl | 657 | ||||
-rw-r--r-- | arch/arm/crypto/sha512-glue.c | 110 | ||||
-rw-r--r-- | arch/arm/crypto/sha512-neon-glue.c | 75 | ||||
-rw-r--r-- | arch/arm/crypto/sha512.h | 3 |
12 files changed, 0 insertions, 2401 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 7efb9a8596e4..1e5f3cdf691c 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -62,47 +62,6 @@ config CRYPTO_BLAKE2B_NEON much faster than the SHA-2 family and slightly faster than SHA-1. -config CRYPTO_SHA1_ARM - tristate "Hash functions: SHA-1" - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash algorithm (FIPS 180) - - Architecture: arm - -config CRYPTO_SHA1_ARM_NEON - tristate "Hash functions: SHA-1 (NEON)" - depends on KERNEL_MODE_NEON - select CRYPTO_SHA1_ARM - select CRYPTO_SHA1 - select CRYPTO_HASH - help - SHA-1 secure hash algorithm (FIPS 180) - - Architecture: arm using - - NEON (Advanced SIMD) extensions - -config CRYPTO_SHA1_ARM_CE - tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON - select CRYPTO_SHA1_ARM - select CRYPTO_HASH - help - SHA-1 secure hash algorithm (FIPS 180) - - Architecture: arm using ARMv8 Crypto Extensions - -config CRYPTO_SHA512_ARM - tristate "Hash functions: SHA-384 and SHA-512 (NEON)" - select CRYPTO_HASH - depends on !CPU_V7M - help - SHA-384 and SHA-512 secure hash algorithms (FIPS 180) - - Architecture: arm using - - NEON (Advanced SIMD) extensions - config CRYPTO_AES_ARM tristate "Ciphers: AES" select CRYPTO_ALGAPI diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8479137c6e80..4f23999ae17d 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -5,38 +5,17 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o -obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o -obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o -obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o -obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o aes-arm-y := aes-cipher-core.o aes-cipher-glue.o aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o -sha1-arm-y := sha1-armv4-large.o sha1_glue.o -sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o -sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o -sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o -sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o curve25519-neon-y := curve25519-core.o curve25519-glue.o - -quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $(<) > $(@) - -$(obj)/%-core.S: $(src)/%-armv4.pl - $(call cmd,perl) - -clean-files += sha512-core.S - -aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 - -AFLAGS_sha512-core.o += $(aflags-thumb2-y) diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S deleted file mode 100644 index 1c8b685149f2..000000000000 --- a/arch/arm/crypto/sha1-armv4-large.S +++ /dev/null @@ -1,507 +0,0 @@ -#define __ARM_ARCH__ __LINUX_ARM_ARCH__ -@ SPDX-License-Identifier: GPL-2.0 - -@ This code is taken from the OpenSSL project but the author (Andy Polyakov) -@ has relicensed it under the GPLv2. Therefore this program is free software; -@ you can redistribute it and/or modify it under the terms of the GNU General -@ Public License version 2 as published by the Free Software Foundation. -@ -@ The original headers, including the original license headers, are -@ included below for completeness. - -@ ==================================================================== -@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see https://www.openssl.org/~appro/cryptogams/. -@ ==================================================================== - -@ sha1_block procedure for ARMv4. -@ -@ January 2007. - -@ Size/performance trade-off -@ ==================================================================== -@ impl size in bytes comp cycles[*] measured performance -@ ==================================================================== -@ thumb 304 3212 4420 -@ armv4-small 392/+29% 1958/+64% 2250/+96% -@ armv4-compact 740/+89% 1552/+26% 1840/+22% -@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***] -@ full unroll ~5100/+260% ~1260/+4% ~1300/+5% -@ ==================================================================== -@ thumb = same as 'small' but in Thumb instructions[**] and -@ with recurring code in two private functions; -@ small = detached Xload/update, loops are folded; -@ compact = detached Xload/update, 5x unroll; -@ large = interleaved Xload/update, 5x unroll; -@ full unroll = interleaved Xload/update, full unroll, estimated[!]; -@ -@ [*] Manually counted instructions in "grand" loop body. Measured -@ performance is affected by prologue and epilogue overhead, -@ i-cache availability, branch penalties, etc. -@ [**] While each Thumb instruction is twice smaller, they are not as -@ diverse as ARM ones: e.g., there are only two arithmetic -@ instructions with 3 arguments, no [fixed] rotate, addressing -@ modes are limited. As result it takes more instructions to do -@ the same job in Thumb, therefore the code is never twice as -@ small and always slower. -@ [***] which is also ~35% better than compiler generated code. Dual- -@ issue Cortex A8 core was measured to process input block in -@ ~990 cycles. - -@ August 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 13% improvement on -@ Cortex A8 core and in absolute terms ~870 cycles per input block -@ [or 13.6 cycles per byte]. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 10% -@ improvement on Cortex A8 core and 12.2 cycles per byte. - -#include <linux/linkage.h> - -.text - -.align 2 -ENTRY(sha1_block_data_order) - stmdb sp!,{r4-r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - ldmia r0,{r3,r4,r5,r6,r7} -.Lloop: - ldr r8,.LK_00_19 - mov r14,sp - sub sp,sp,#15*4 - mov r5,r5,ror#30 - mov r6,r6,ror#30 - mov r7,r7,ror#30 @ [6] -.L_00_15: -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r6,r8,r6,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r4,r5 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r6,r8,r6,ror#2 @ E+=K_00_19 - eor r10,r4,r5 @ F_xx_xx - add r6,r6,r7,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r3,r10,ror#2 - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r6,r6,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r5,r8,r5,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r3,r4 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r5,r8,r5,ror#2 @ E+=K_00_19 - eor r10,r3,r4 @ F_xx_xx - add r5,r5,r6,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r7,r10,ror#2 - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r5,r5,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r4,r8,r4,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r7,r3 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r4,r8,r4,ror#2 @ E+=K_00_19 - eor r10,r7,r3 @ F_xx_xx - add r4,r4,r5,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r6,r10,ror#2 - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r4,r4,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r3,r8,r3,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r6,r7 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r3,r8,r3,ror#2 @ E+=K_00_19 - eor r10,r6,r7 @ F_xx_xx - add r3,r3,r4,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r5,r10,ror#2 - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r3,r3,r10 @ E+=F_00_19(B,C,D) - cmp r14,sp - bne .L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#25*4 -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - add r6,r6,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - add r5,r5,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - add r4,r4,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - add r3,r3,r10 @ E+=F_00_19(B,C,D) - - ldr r8,.LK_20_39 @ [+15+16*4] - cmn sp,#0 @ [+3], clear carry to denote 20_39 -.L_20_39_or_60_79: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r4,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_20_39(B,C,D) - ARM( teq r14,sp ) @ preserve carry - THUMB( mov r11,sp ) - THUMB( teq r14,r11 ) @ preserve carry - bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] - bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes - - ldr r8,.LK_40_59 - sub sp,sp,#20*4 @ [+2] -.L_40_59: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r4,r10,ror#2 @ F_xx_xx - and r11,r5,r6 @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_40_59(B,C,D) - add r7,r7,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - and r11,r4,r5 @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_40_59(B,C,D) - add r6,r6,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - and r11,r3,r4 @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_40_59(B,C,D) - add r5,r5,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - and r11,r7,r3 @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_40_59(B,C,D) - add r4,r4,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - and r11,r6,r7 @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_40_59(B,C,D) - add r3,r3,r11,ror#2 - cmp r14,sp - bne .L_40_59 @ [+((12+5)*5+2)*4] - - ldr r8,.LK_60_79 - sub sp,sp,#20*4 - cmp sp,#0 @ set carry to denote 60_79 - b .L_20_39_or_60_79 @ [+4], spare 300 bytes -.L_done: - add sp,sp,#80*4 @ "deallocate" stack frame - ldmia r0,{r8,r9,r10,r11,r12} - add r3,r8,r3 - add r4,r9,r4 - add r5,r10,r5,ror#2 - add r6,r11,r6,ror#2 - add r7,r12,r7,ror#2 - stmia r0,{r3,r4,r5,r6,r7} - teq r1,r2 - bne .Lloop @ [+18], total 1307 - - ldmia sp!,{r4-r12,pc} -.align 2 -.LK_00_19: .word 0x5a827999 -.LK_20_39: .word 0x6ed9eba1 -.LK_40_59: .word 0x8f1bbcdc -.LK_60_79: .word 0xca62c1d6 -ENDPROC(sha1_block_data_order) -.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" -.align 2 diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S deleted file mode 100644 index 28d816a6a530..000000000000 --- a/arch/arm/crypto/sha1-armv7-neon.S +++ /dev/null @@ -1,634 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function - * - * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> - -.syntax unified -.fpu neon - -.text - - -/* Context structure */ - -#define state_h0 0 -#define state_h1 4 -#define state_h2 8 -#define state_h3 12 -#define state_h4 16 - - -/* Constants */ - -#define K1 0x5A827999 -#define K2 0x6ED9EBA1 -#define K3 0x8F1BBCDC -#define K4 0xCA62C1D6 -.align 4 -.LK_VEC: -.LK1: .long K1, K1, K1, K1 -.LK2: .long K2, K2, K2, K2 -.LK3: .long K3, K3, K3, K3 -.LK4: .long K4, K4, K4, K4 - - -/* Register macros */ - -#define RSTATE r0 -#define RDATA r1 -#define RNBLKS r2 -#define ROLDSTACK r3 -#define RWK lr - -#define _a r4 -#define _b r5 -#define _c r6 -#define _d r7 -#define _e r8 - -#define RT0 r9 -#define RT1 r10 -#define RT2 r11 -#define RT3 r12 - -#define W0 q0 -#define W1 q7 -#define W2 q2 -#define W3 q3 -#define W4 q4 -#define W5 q6 -#define W6 q5 -#define W7 q1 - -#define tmp0 q8 -#define tmp1 q9 -#define tmp2 q10 -#define tmp3 q11 - -#define qK1 q12 -#define qK2 q13 -#define qK3 q14 -#define qK4 q15 - -#ifdef CONFIG_CPU_BIG_ENDIAN -#define ARM_LE(code...) -#else -#define ARM_LE(code...) code -#endif - -/* Round function macros. */ - -#define WK_offs(i) (((i) & 15) * 4) - -#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ldr RT3, [sp, WK_offs(i)]; \ - pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - bic RT0, d, b; \ - add e, e, a, ror #(32 - 5); \ - and RT1, c, b; \ - pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - add RT0, RT0, RT3; \ - add e, e, RT1; \ - ror b, #(32 - 30); \ - pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - add e, e, RT0; - -#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ldr RT3, [sp, WK_offs(i)]; \ - pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - eor RT0, d, b; \ - add e, e, a, ror #(32 - 5); \ - eor RT0, RT0, c; \ - pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - add e, e, RT3; \ - ror b, #(32 - 30); \ - pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - add e, e, RT0; \ - -#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ldr RT3, [sp, WK_offs(i)]; \ - pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - eor RT0, b, c; \ - and RT1, b, c; \ - add e, e, a, ror #(32 - 5); \ - pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - and RT0, RT0, d; \ - add RT1, RT1, RT3; \ - add e, e, RT0; \ - ror b, #(32 - 30); \ - pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ - add e, e, RT1; - -#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) - -#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - _R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) - -#define R(a,b,c,d,e,f,i) \ - _R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\ - W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) - -#define dummy(...) - - -/* Input expansion macros. */ - -/********* Precalc macros for rounds 0-15 *************************************/ - -#define W_PRECALC_00_15() \ - add RWK, sp, #(WK_offs(0)); \ - \ - vld1.32 {W0, W7}, [RDATA]!; \ - ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ - vld1.32 {W6, W5}, [RDATA]!; \ - vadd.u32 tmp0, W0, curK; \ - ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ - ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ - vadd.u32 tmp1, W7, curK; \ - ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ - vadd.u32 tmp2, W6, curK; \ - vst1.32 {tmp0, tmp1}, [RWK]!; \ - vadd.u32 tmp3, W5, curK; \ - vst1.32 {tmp2, tmp3}, [RWK]; \ - -#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {W0, W7}, [RDATA]!; \ - -#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - add RWK, sp, #(WK_offs(0)); \ - -#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ - -#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {W6, W5}, [RDATA]!; \ - -#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp0, W0, curK; \ - -#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ - -#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ - -#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp1, W7, curK; \ - -#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ - -#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp2, W6, curK; \ - -#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vst1.32 {tmp0, tmp1}, [RWK]!; \ - -#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp3, W5, curK; \ - -#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vst1.32 {tmp2, tmp3}, [RWK]; \ - - -/********* Precalc macros for rounds 16-31 ************************************/ - -#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor tmp0, tmp0; \ - vext.8 W, W_m16, W_m12, #8; \ - -#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - add RWK, sp, #(WK_offs(i)); \ - vext.8 tmp0, W_m04, tmp0, #4; \ - -#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor tmp0, tmp0, W_m16; \ - veor.32 W, W, W_m08; \ - -#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor tmp1, tmp1; \ - veor W, W, tmp0; \ - -#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vshl.u32 tmp0, W, #1; \ - -#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vext.8 tmp1, tmp1, W, #(16-12); \ - vshr.u32 W, W, #31; \ - -#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vorr tmp0, tmp0, W; \ - vshr.u32 W, tmp1, #30; \ - -#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vshl.u32 tmp1, tmp1, #2; \ - -#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor tmp0, tmp0, W; \ - -#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor W, tmp0, tmp1; \ - -#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp0, W, curK; \ - -#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vst1.32 {tmp0}, [RWK]; - - -/********* Precalc macros for rounds 32-79 ************************************/ - -#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor W, W_m28; \ - -#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vext.8 tmp0, W_m08, W_m04, #8; \ - -#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor W, W_m16; \ - -#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - veor W, tmp0; \ - -#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - add RWK, sp, #(WK_offs(i&~3)); \ - -#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vshl.u32 tmp1, W, #2; \ - -#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vshr.u32 tmp0, W, #30; \ - -#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vorr W, tmp0, tmp1; \ - -#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vadd.u32 tmp0, W, curK; \ - -#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vst1.32 {tmp0}, [RWK]; - - -/* - * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. - * - * unsigned int - * sha1_transform_neon (void *ctx, const unsigned char *data, - * unsigned int nblks) - */ -.align 3 -ENTRY(sha1_transform_neon) - /* input: - * r0: ctx, CTX - * r1: data (64*nblks bytes) - * r2: nblks - */ - - cmp RNBLKS, #0; - beq .Ldo_nothing; - - push {r4-r12, lr}; - /*vpush {q4-q7};*/ - - adr RT3, .LK_VEC; - - mov ROLDSTACK, sp; - - /* Align stack. */ - sub RT0, sp, #(16*4); - and RT0, #(~(16-1)); - mov sp, RT0; - - vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */ - - /* Get the values of the chaining variables. */ - ldm RSTATE, {_a-_e}; - - vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */ - -#undef curK -#define curK qK1 - /* Precalc 0-15. */ - W_PRECALC_00_15(); - -.Loop: - /* Transform 0-15 + Precalc 16-31. */ - _R( _a, _b, _c, _d, _e, F1, 0, - WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16, - W4, W5, W6, W7, W0, _, _, _ ); - _R( _e, _a, _b, _c, _d, F1, 1, - WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16, - W4, W5, W6, W7, W0, _, _, _ ); - _R( _d, _e, _a, _b, _c, F1, 2, - WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16, - W4, W5, W6, W7, W0, _, _, _ ); - _R( _c, _d, _e, _a, _b, F1, 3, - WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16, - W4, W5, W6, W7, W0, _, _, _ ); - -#undef curK -#define curK qK2 - _R( _b, _c, _d, _e, _a, F1, 4, - WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20, - W3, W4, W5, W6, W7, _, _, _ ); - _R( _a, _b, _c, _d, _e, F1, 5, - WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20, - W3, W4, W5, W6, W7, _, _, _ ); - _R( _e, _a, _b, _c, _d, F1, 6, - WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20, - W3, W4, W5, W6, W7, _, _, _ ); - _R( _d, _e, _a, _b, _c, F1, 7, - WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20, - W3, W4, W5, W6, W7, _, _, _ ); - - _R( _c, _d, _e, _a, _b, F1, 8, - WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24, - W2, W3, W4, W5, W6, _, _, _ ); - _R( _b, _c, _d, _e, _a, F1, 9, - WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24, - W2, W3, W4, W5, W6, _, _, _ ); - _R( _a, _b, _c, _d, _e, F1, 10, - WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24, - W2, W3, W4, W5, W6, _, _, _ ); - _R( _e, _a, _b, _c, _d, F1, 11, - WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24, - W2, W3, W4, W5, W6, _, _, _ ); - - _R( _d, _e, _a, _b, _c, F1, 12, - WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28, - W1, W2, W3, W4, W5, _, _, _ ); - _R( _c, _d, _e, _a, _b, F1, 13, - WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28, - W1, W2, W3, W4, W5, _, _, _ ); - _R( _b, _c, _d, _e, _a, F1, 14, - WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28, - W1, W2, W3, W4, W5, _, _, _ ); - _R( _a, _b, _c, _d, _e, F1, 15, - WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28, - W1, W2, W3, W4, W5, _, _, _ ); - - /* Transform 16-63 + Precalc 32-79. */ - _R( _e, _a, _b, _c, _d, F1, 16, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _d, _e, _a, _b, _c, F1, 17, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _c, _d, _e, _a, _b, F1, 18, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _b, _c, _d, _e, _a, F1, 19, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32, - W0, W1, W2, W3, W4, W5, W6, W7); - - _R( _a, _b, _c, _d, _e, F2, 20, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _e, _a, _b, _c, _d, F2, 21, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _d, _e, _a, _b, _c, F2, 22, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _c, _d, _e, _a, _b, F2, 23, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36, - W7, W0, W1, W2, W3, W4, W5, W6); - -#undef curK -#define curK qK3 - _R( _b, _c, _d, _e, _a, F2, 24, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _a, _b, _c, _d, _e, F2, 25, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _e, _a, _b, _c, _d, F2, 26, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _d, _e, _a, _b, _c, F2, 27, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40, - W6, W7, W0, W1, W2, W3, W4, W5); - - _R( _c, _d, _e, _a, _b, F2, 28, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _b, _c, _d, _e, _a, F2, 29, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _a, _b, _c, _d, _e, F2, 30, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _e, _a, _b, _c, _d, F2, 31, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44, - W5, W6, W7, W0, W1, W2, W3, W4); - - _R( _d, _e, _a, _b, _c, F2, 32, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48, - W4, W5, W6, W7, W0, W1, W2, W3); - _R( _c, _d, _e, _a, _b, F2, 33, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48, - W4, W5, W6, W7, W0, W1, W2, W3); - _R( _b, _c, _d, _e, _a, F2, 34, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48, - W4, W5, W6, W7, W0, W1, W2, W3); - _R( _a, _b, _c, _d, _e, F2, 35, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48, - W4, W5, W6, W7, W0, W1, W2, W3); - - _R( _e, _a, _b, _c, _d, F2, 36, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52, - W3, W4, W5, W6, W7, W0, W1, W2); - _R( _d, _e, _a, _b, _c, F2, 37, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52, - W3, W4, W5, W6, W7, W0, W1, W2); - _R( _c, _d, _e, _a, _b, F2, 38, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52, - W3, W4, W5, W6, W7, W0, W1, W2); - _R( _b, _c, _d, _e, _a, F2, 39, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52, - W3, W4, W5, W6, W7, W0, W1, W2); - - _R( _a, _b, _c, _d, _e, F3, 40, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56, - W2, W3, W4, W5, W6, W7, W0, W1); - _R( _e, _a, _b, _c, _d, F3, 41, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56, - W2, W3, W4, W5, W6, W7, W0, W1); - _R( _d, _e, _a, _b, _c, F3, 42, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56, - W2, W3, W4, W5, W6, W7, W0, W1); - _R( _c, _d, _e, _a, _b, F3, 43, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56, - W2, W3, W4, W5, W6, W7, W0, W1); - -#undef curK -#define curK qK4 - _R( _b, _c, _d, _e, _a, F3, 44, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60, - W1, W2, W3, W4, W5, W6, W7, W0); - _R( _a, _b, _c, _d, _e, F3, 45, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60, - W1, W2, W3, W4, W5, W6, W7, W0); - _R( _e, _a, _b, _c, _d, F3, 46, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60, - W1, W2, W3, W4, W5, W6, W7, W0); - _R( _d, _e, _a, _b, _c, F3, 47, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60, - W1, W2, W3, W4, W5, W6, W7, W0); - - _R( _c, _d, _e, _a, _b, F3, 48, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _b, _c, _d, _e, _a, F3, 49, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _a, _b, _c, _d, _e, F3, 50, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64, - W0, W1, W2, W3, W4, W5, W6, W7); - _R( _e, _a, _b, _c, _d, F3, 51, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64, - W0, W1, W2, W3, W4, W5, W6, W7); - - _R( _d, _e, _a, _b, _c, F3, 52, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _c, _d, _e, _a, _b, F3, 53, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _b, _c, _d, _e, _a, F3, 54, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68, - W7, W0, W1, W2, W3, W4, W5, W6); - _R( _a, _b, _c, _d, _e, F3, 55, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68, - W7, W0, W1, W2, W3, W4, W5, W6); - - _R( _e, _a, _b, _c, _d, F3, 56, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _d, _e, _a, _b, _c, F3, 57, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _c, _d, _e, _a, _b, F3, 58, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72, - W6, W7, W0, W1, W2, W3, W4, W5); - _R( _b, _c, _d, _e, _a, F3, 59, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72, - W6, W7, W0, W1, W2, W3, W4, W5); - - subs RNBLKS, #1; - - _R( _a, _b, _c, _d, _e, F4, 60, - WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _e, _a, _b, _c, _d, F4, 61, - WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _d, _e, _a, _b, _c, F4, 62, - WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76, - W5, W6, W7, W0, W1, W2, W3, W4); - _R( _c, _d, _e, _a, _b, F4, 63, - WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76, - W5, W6, W7, W0, W1, W2, W3, W4); - - beq .Lend; - - /* Transform 64-79 + Precalc 0-15 of next block. */ -#undef curK -#define curK qK1 - _R( _b, _c, _d, _e, _a, F4, 64, - WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _a, _b, _c, _d, _e, F4, 65, - WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _e, _a, _b, _c, _d, F4, 66, - WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _d, _e, _a, _b, _c, F4, 67, - WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - - _R( _c, _d, _e, _a, _b, F4, 68, - dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _b, _c, _d, _e, _a, F4, 69, - dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _a, _b, _c, _d, _e, F4, 70, - WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _e, _a, _b, _c, _d, F4, 71, - WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - - _R( _d, _e, _a, _b, _c, F4, 72, - dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _c, _d, _e, _a, _b, F4, 73, - dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _b, _c, _d, _e, _a, F4, 74, - WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _a, _b, _c, _d, _e, F4, 75, - WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - - _R( _e, _a, _b, _c, _d, F4, 76, - WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _d, _e, _a, _b, _c, F4, 77, - WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _c, _d, _e, _a, _b, F4, 78, - WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ ); - _R( _b, _c, _d, _e, _a, F4, 79, - WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ ); - - /* Update the chaining variables. */ - ldm RSTATE, {RT0-RT3}; - add _a, RT0; - ldr RT0, [RSTATE, #state_h4]; - add _b, RT1; - add _c, RT2; - add _d, RT3; - add _e, RT0; - stm RSTATE, {_a-_e}; - - b .Loop; - -.Lend: - /* Transform 64-79 */ - R( _b, _c, _d, _e, _a, F4, 64 ); - R( _a, _b, _c, _d, _e, F4, 65 ); - R( _e, _a, _b, _c, _d, F4, 66 ); - R( _d, _e, _a, _b, _c, F4, 67 ); - R( _c, _d, _e, _a, _b, F4, 68 ); - R( _b, _c, _d, _e, _a, F4, 69 ); - R( _a, _b, _c, _d, _e, F4, 70 ); - R( _e, _a, _b, _c, _d, F4, 71 ); - R( _d, _e, _a, _b, _c, F4, 72 ); - R( _c, _d, _e, _a, _b, F4, 73 ); - R( _b, _c, _d, _e, _a, F4, 74 ); - R( _a, _b, _c, _d, _e, F4, 75 ); - R( _e, _a, _b, _c, _d, F4, 76 ); - R( _d, _e, _a, _b, _c, F4, 77 ); - R( _c, _d, _e, _a, _b, F4, 78 ); - R( _b, _c, _d, _e, _a, F4, 79 ); - - mov sp, ROLDSTACK; - - /* Update the chaining variables. */ - ldm RSTATE, {RT0-RT3}; - add _a, RT0; - ldr RT0, [RSTATE, #state_h4]; - add _b, RT1; - add _c, RT2; - add _d, RT3; - /*vpop {q4-q7};*/ - add _e, RT0; - stm RSTATE, {_a-_e}; - - pop {r4-r12, pc}; - -.Ldo_nothing: - bx lr -ENDPROC(sha1_transform_neon) diff --git a/arch/arm/crypto/sha1-ce-core.S b/arch/arm/crypto/sha1-ce-core.S deleted file mode 100644 index 8a702e051738..000000000000 --- a/arch/arm/crypto/sha1-ce-core.S +++ /dev/null @@ -1,123 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions - * - * Copyright (C) 2015 Linaro Ltd. - * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org> - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> - - .text - .arch armv8-a - .fpu crypto-neon-fp-armv8 - - k0 .req q0 - k1 .req q1 - k2 .req q2 - k3 .req q3 - - ta0 .req q4 - ta1 .req q5 - tb0 .req q5 - tb1 .req q4 - - dga .req q6 - dgb .req q7 - dgbs .req s28 - - dg0 .req q12 - dg1a0 .req q13 - dg1a1 .req q14 - dg1b0 .req q14 - dg1b1 .req q13 - - .macro add_only, op, ev, rc, s0, dg1 - .ifnb \s0 - vadd.u32 tb\ev, q\s0, \rc - .endif - sha1h.32 dg1b\ev, dg0 - .ifb \dg1 - sha1\op\().32 dg0, dg1a\ev, ta\ev - .else - sha1\op\().32 dg0, \dg1, ta\ev - .endif - .endm - - .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 - sha1su0.32 q\s0, q\s1, q\s2 - add_only \op, \ev, \rc, \s1, \dg1 - sha1su1.32 q\s0, q\s3 - .endm - - .align 6 -.Lsha1_rcon: - .word 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999 - .word 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1 - .word 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc - .word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6 - - /* - * void sha1_ce_transform(struct sha1_state *sst, u8 const *src, - * int blocks); - */ -ENTRY(sha1_ce_transform) - /* load round constants */ - adr ip, .Lsha1_rcon - vld1.32 {k0-k1}, [ip, :128]! - vld1.32 {k2-k3}, [ip, :128] - - /* load state */ - vld1.32 {dga}, [r0] - vldr dgbs, [r0, #16] - - /* load input */ -0: vld1.32 {q8-q9}, [r1]! - vld1.32 {q10-q11}, [r1]! - subs r2, r2, #1 - -#ifndef CONFIG_CPU_BIG_ENDIAN - vrev32.8 q8, q8 - vrev32.8 q9, q9 - vrev32.8 q10, q10 - vrev32.8 q11, q11 -#endif - - vadd.u32 ta0, q8, k0 - vmov dg0, dga - - add_update c, 0, k0, 8, 9, 10, 11, dgb - add_update c, 1, k0, 9, 10, 11, 8 - add_update c, 0, k0, 10, 11, 8, 9 - add_update c, 1, k0, 11, 8, 9, 10 - add_update c, 0, k1, 8, 9, 10, 11 - - add_update p, 1, k1, 9, 10, 11, 8 - add_update p, 0, k1, 10, 11, 8, 9 - add_update p, 1, k1, 11, 8, 9, 10 - add_update p, 0, k1, 8, 9, 10, 11 - add_update p, 1, k2, 9, 10, 11, 8 - - add_update m, 0, k2, 10, 11, 8, 9 - add_update m, 1, k2, 11, 8, 9, 10 - add_update m, 0, k2, 8, 9, 10, 11 - add_update m, 1, k2, 9, 10, 11, 8 - add_update m, 0, k3, 10, 11, 8, 9 - - add_update p, 1, k3, 11, 8, 9, 10 - add_only p, 0, k3, 9 - add_only p, 1, k3, 10 - add_only p, 0, k3, 11 - add_only p, 1 - - /* update state */ - vadd.u32 dga, dga, dg0 - vadd.u32 dgb, dgb, dg1a0 - bne 0b - - /* store new state */ - vst1.32 {dga}, [r0] - vstr dgbs, [r0, #16] - bx lr -ENDPROC(sha1_ce_transform) diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c deleted file mode 100644 index fac07a4799de..000000000000 --- a/arch/arm/crypto/sha1-ce-glue.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions - * - * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#include <asm/neon.h> -#include <crypto/internal/hash.h> -#include <crypto/sha1.h> -#include <crypto/sha1_base.h> -#include <linux/cpufeature.h> -#include <linux/kernel.h> -#include <linux/module.h> - -MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); - -asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src, - int blocks); - -static int sha1_ce_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - int remain; - - kernel_neon_begin(); - remain = sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform); - kernel_neon_end(); - - return remain; -} - -static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - kernel_neon_begin(); - sha1_base_do_finup(desc, data, len, sha1_ce_transform); - kernel_neon_end(); - - return sha1_base_finish(desc, out); -} - -static struct shash_alg alg = { - .init = sha1_base_init, - .update = sha1_ce_update, - .finup = sha1_ce_finup, - .descsize = SHA1_STATE_SIZE, - .digestsize = SHA1_DIGEST_SIZE, - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-ce", - .cra_priority = 200, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init sha1_ce_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit sha1_ce_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_cpu_feature_match(SHA1, sha1_ce_mod_init); -module_exit(sha1_ce_mod_fini); diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c deleted file mode 100644 index 255da00c7d98..000000000000 --- a/arch/arm/crypto/sha1_glue.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * Glue code for the SHA1 Secure Hash Algorithm assembler implementation - * - * This file is based on sha1_generic.c and sha1_ssse3_glue.c - * - * Copyright (c) Alan Smithee. - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> - * Copyright (c) Mathias Krause <minipli@googlemail.com> - */ - -#include <crypto/internal/hash.h> -#include <crypto/sha1.h> -#include <crypto/sha1_base.h> -#include <linux/kernel.h> -#include <linux/module.h> - -asmlinkage void sha1_block_data_order(struct sha1_state *digest, - const u8 *data, int rounds); - -static int sha1_update_arm(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - /* make sure signature matches sha1_block_fn() */ - BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); - - return sha1_base_do_update_blocks(desc, data, len, - sha1_block_data_order); -} - -static int sha1_finup_arm(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha1_base_do_finup(desc, data, len, sha1_block_data_order); - return sha1_base_finish(desc, out); -} - -static struct shash_alg alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_base_init, - .update = sha1_update_arm, - .finup = sha1_finup_arm, - .descsize = SHA1_STATE_SIZE, - .base = { - .cra_name = "sha1", - .cra_driver_name= "sha1-asm", - .cra_priority = 150, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - - -static int __init sha1_mod_init(void) -{ - return crypto_register_shash(&alg); -} - - -static void __exit sha1_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - - -module_init(sha1_mod_init); -module_exit(sha1_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); -MODULE_ALIAS_CRYPTO("sha1"); -MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>"); diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c deleted file mode 100644 index d321850f22a6..000000000000 --- a/arch/arm/crypto/sha1_neon_glue.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using - * ARM NEON instructions. - * - * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> - * - * This file is based on sha1_generic.c and sha1_ssse3_glue.c: - * Copyright (c) Alan Smithee. - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> - * Copyright (c) Mathias Krause <minipli@googlemail.com> - * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> - */ - -#include <asm/neon.h> -#include <crypto/internal/hash.h> -#include <crypto/sha1.h> -#include <crypto/sha1_base.h> -#include <linux/kernel.h> -#include <linux/module.h> - -asmlinkage void sha1_transform_neon(struct sha1_state *state_h, - const u8 *data, int rounds); - -static int sha1_neon_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - int remain; - - kernel_neon_begin(); - remain = sha1_base_do_update_blocks(desc, data, len, - sha1_transform_neon); - kernel_neon_end(); - - return remain; -} - -static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - kernel_neon_begin(); - sha1_base_do_finup(desc, data, len, sha1_transform_neon); - kernel_neon_end(); - - return sha1_base_finish(desc, out); -} - -static struct shash_alg alg = { - .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_base_init, - .update = sha1_neon_update, - .finup = sha1_neon_finup, - .descsize = SHA1_STATE_SIZE, - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-neon", - .cra_priority = 250, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init sha1_neon_mod_init(void) -{ - if (!cpu_has_neon()) - return -ENODEV; - - return crypto_register_shash(&alg); -} - -static void __exit sha1_neon_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(sha1_neon_mod_init); -module_exit(sha1_neon_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); -MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl deleted file mode 100644 index 2fc3516912fa..000000000000 --- a/arch/arm/crypto/sha512-armv4.pl +++ /dev/null @@ -1,657 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0 - -# This code is taken from the OpenSSL project but the author (Andy Polyakov) -# has relicensed it under the GPLv2. Therefore this program is free software; -# you can redistribute it and/or modify it under the terms of the GNU General -# Public License version 2 as published by the Free Software Foundation. -# -# The original headers, including the original license headers, are -# included below for completeness. - -# ==================================================================== -# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see https://www.openssl.org/~appro/cryptogams/. -# ==================================================================== - -# SHA512 block procedure for ARMv4. September 2007. - -# This code is ~4.5 (four and a half) times faster than code generated -# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue -# Xscale PXA250 core]. -# -# July 2010. -# -# Rescheduling for dual-issue pipeline resulted in 6% improvement on -# Cortex A8 core and ~40 cycles per processed byte. - -# February 2011. -# -# Profiler-assisted and platform-specific optimization resulted in 7% -# improvement on Coxtex A8 core and ~38 cycles per byte. - -# March 2011. -# -# Add NEON implementation. On Cortex A8 it was measured to process -# one byte in 23.3 cycles or ~60% faster than integer-only code. - -# August 2012. -# -# Improve NEON performance by 12% on Snapdragon S4. In absolute -# terms it's 22.6 cycles per byte, which is disappointing result. -# Technical writers asserted that 3-way S4 pipeline can sustain -# multiple NEON instructions per cycle, but dual NEON issue could -# not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html -# for further details. On side note Cortex-A15 processes one byte in -# 16 cycles. - -# Byte order [in]dependence. ========================================= -# -# Originally caller was expected to maintain specific *dword* order in -# h[0-7], namely with most significant dword at *lower* address, which -# was reflected in below two parameters as 0 and 4. Now caller is -# expected to maintain native byte order for whole 64-bit values. -$hi="HI"; -$lo="LO"; -# ==================================================================== - -while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} -open STDOUT,">$output"; - -$ctx="r0"; # parameter block -$inp="r1"; -$len="r2"; - -$Tlo="r3"; -$Thi="r4"; -$Alo="r5"; -$Ahi="r6"; -$Elo="r7"; -$Ehi="r8"; -$t0="r9"; -$t1="r10"; -$t2="r11"; -$t3="r12"; -############ r13 is stack pointer -$Ktbl="r14"; -############ r15 is program counter - -$Aoff=8*0; -$Boff=8*1; -$Coff=8*2; -$Doff=8*3; -$Eoff=8*4; -$Foff=8*5; -$Goff=8*6; -$Hoff=8*7; -$Xoff=8*8; - -sub BODY_00_15() { -my $magic = shift; -$code.=<<___; - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov $t0,$Elo,lsr#14 - str $Tlo,[sp,#$Xoff+0] - mov $t1,$Ehi,lsr#14 - str $Thi,[sp,#$Xoff+4] - eor $t0,$t0,$Ehi,lsl#18 - ldr $t2,[sp,#$Hoff+0] @ h.lo - eor $t1,$t1,$Elo,lsl#18 - ldr $t3,[sp,#$Hoff+4] @ h.hi - eor $t0,$t0,$Elo,lsr#18 - eor $t1,$t1,$Ehi,lsr#18 - eor $t0,$t0,$Ehi,lsl#14 - eor $t1,$t1,$Elo,lsl#14 - eor $t0,$t0,$Ehi,lsr#9 - eor $t1,$t1,$Elo,lsr#9 - eor $t0,$t0,$Elo,lsl#23 - eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e) - adds $Tlo,$Tlo,$t0 - ldr $t0,[sp,#$Foff+0] @ f.lo - adc $Thi,$Thi,$t1 @ T += Sigma1(e) - ldr $t1,[sp,#$Foff+4] @ f.hi - adds $Tlo,$Tlo,$t2 - ldr $t2,[sp,#$Goff+0] @ g.lo - adc $Thi,$Thi,$t3 @ T += h - ldr $t3,[sp,#$Goff+4] @ g.hi - - eor $t0,$t0,$t2 - str $Elo,[sp,#$Eoff+0] - eor $t1,$t1,$t3 - str $Ehi,[sp,#$Eoff+4] - and $t0,$t0,$Elo - str $Alo,[sp,#$Aoff+0] - and $t1,$t1,$Ehi - str $Ahi,[sp,#$Aoff+4] - eor $t0,$t0,$t2 - ldr $t2,[$Ktbl,#$lo] @ K[i].lo - eor $t1,$t1,$t3 @ Ch(e,f,g) - ldr $t3,[$Ktbl,#$hi] @ K[i].hi - - adds $Tlo,$Tlo,$t0 - ldr $Elo,[sp,#$Doff+0] @ d.lo - adc $Thi,$Thi,$t1 @ T += Ch(e,f,g) - ldr $Ehi,[sp,#$Doff+4] @ d.hi - adds $Tlo,$Tlo,$t2 - and $t0,$t2,#0xff - adc $Thi,$Thi,$t3 @ T += K[i] - adds $Elo,$Elo,$Tlo - ldr $t2,[sp,#$Boff+0] @ b.lo - adc $Ehi,$Ehi,$Thi @ d += T - teq $t0,#$magic - - ldr $t3,[sp,#$Coff+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq $Ktbl,$Ktbl,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov $t0,$Alo,lsr#28 - mov $t1,$Ahi,lsr#28 - eor $t0,$t0,$Ahi,lsl#4 - eor $t1,$t1,$Alo,lsl#4 - eor $t0,$t0,$Ahi,lsr#2 - eor $t1,$t1,$Alo,lsr#2 - eor $t0,$t0,$Alo,lsl#30 - eor $t1,$t1,$Ahi,lsl#30 - eor $t0,$t0,$Ahi,lsr#7 - eor $t1,$t1,$Alo,lsr#7 - eor $t0,$t0,$Alo,lsl#25 - eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a) - adds $Tlo,$Tlo,$t0 - and $t0,$Alo,$t2 - adc $Thi,$Thi,$t1 @ T += Sigma0(a) - - ldr $t1,[sp,#$Boff+4] @ b.hi - orr $Alo,$Alo,$t2 - ldr $t2,[sp,#$Coff+4] @ c.hi - and $Alo,$Alo,$t3 - and $t3,$Ahi,$t1 - orr $Ahi,$Ahi,$t1 - orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo - and $Ahi,$Ahi,$t2 - adds $Alo,$Alo,$Tlo - orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc $Ahi,$Ahi,$Thi @ h += T - tst $Ktbl,#1 - add $Ktbl,$Ktbl,#8 -___ -} -$code=<<___; -#ifndef __KERNEL__ -# include "arm_arch.h" -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -# define VFP_ABI_PUSH -# define VFP_ABI_POP -#endif - -#ifdef __ARMEL__ -# define LO 0 -# define HI 4 -# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 -#else -# define HI 0 -# define LO 4 -# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 -#endif - -.text -#if __ARM_ARCH__<7 -.code 32 -#else -.syntax unified -# ifdef __thumb2__ -.thumb -# else -.code 32 -# endif -#endif - -.type K512,%object -.align 5 -K512: -WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) -WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) -WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) -WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) -WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) -WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) -WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) -WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) -WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) -WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) -WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) -WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) -WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) -WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) -WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) -WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) -WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) -WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) -WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) -WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) -WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) -WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) -WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) -WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) -WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) -WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) -WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) -WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) -WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) -WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) -WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) -WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) -WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) -WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) -WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) -WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) -WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) -WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) -WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) -WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) -.size K512,.-K512 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha512_block_data_order -.skip 32-4 -#else -.skip 32 -#endif - -.global sha512_block_data_order -.type sha512_block_data_order,%function -sha512_block_data_order: -.Lsha512_block_data_order: -#if __ARM_ARCH__<7 - sub r3,pc,#8 @ sha512_block_data_order -#else - adr r3,.Lsha512_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#1 - bne .LNEON -#endif - add $len,$inp,$len,lsl#7 @ len to point at the end of inp - stmdb sp!,{r4-r12,lr} - sub $Ktbl,r3,#672 @ K512 - sub sp,sp,#9*8 - - ldr $Elo,[$ctx,#$Eoff+$lo] - ldr $Ehi,[$ctx,#$Eoff+$hi] - ldr $t0, [$ctx,#$Goff+$lo] - ldr $t1, [$ctx,#$Goff+$hi] - ldr $t2, [$ctx,#$Hoff+$lo] - ldr $t3, [$ctx,#$Hoff+$hi] -.Loop: - str $t0, [sp,#$Goff+0] - str $t1, [sp,#$Goff+4] - str $t2, [sp,#$Hoff+0] - str $t3, [sp,#$Hoff+4] - ldr $Alo,[$ctx,#$Aoff+$lo] - ldr $Ahi,[$ctx,#$Aoff+$hi] - ldr $Tlo,[$ctx,#$Boff+$lo] - ldr $Thi,[$ctx,#$Boff+$hi] - ldr $t0, [$ctx,#$Coff+$lo] - ldr $t1, [$ctx,#$Coff+$hi] - ldr $t2, [$ctx,#$Doff+$lo] - ldr $t3, [$ctx,#$Doff+$hi] - str $Tlo,[sp,#$Boff+0] - str $Thi,[sp,#$Boff+4] - str $t0, [sp,#$Coff+0] - str $t1, [sp,#$Coff+4] - str $t2, [sp,#$Doff+0] - str $t3, [sp,#$Doff+4] - ldr $Tlo,[$ctx,#$Foff+$lo] - ldr $Thi,[$ctx,#$Foff+$hi] - str $Tlo,[sp,#$Foff+0] - str $Thi,[sp,#$Foff+4] - -.L00_15: -#if __ARM_ARCH__<7 - ldrb $Tlo,[$inp,#7] - ldrb $t0, [$inp,#6] - ldrb $t1, [$inp,#5] - ldrb $t2, [$inp,#4] - ldrb $Thi,[$inp,#3] - ldrb $t3, [$inp,#2] - orr $Tlo,$Tlo,$t0,lsl#8 - ldrb $t0, [$inp,#1] - orr $Tlo,$Tlo,$t1,lsl#16 - ldrb $t1, [$inp],#8 - orr $Tlo,$Tlo,$t2,lsl#24 - orr $Thi,$Thi,$t3,lsl#8 - orr $Thi,$Thi,$t0,lsl#16 - orr $Thi,$Thi,$t1,lsl#24 -#else - ldr $Tlo,[$inp,#4] - ldr $Thi,[$inp],#8 -#ifdef __ARMEL__ - rev $Tlo,$Tlo - rev $Thi,$Thi -#endif -#endif -___ - &BODY_00_15(0x94); -$code.=<<___; - tst $Ktbl,#1 - beq .L00_15 - ldr $t0,[sp,#`$Xoff+8*(16-1)`+0] - ldr $t1,[sp,#`$Xoff+8*(16-1)`+4] - bic $Ktbl,$Ktbl,#1 -.L16_79: - @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) - @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 - @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 - mov $Tlo,$t0,lsr#1 - ldr $t2,[sp,#`$Xoff+8*(16-14)`+0] - mov $Thi,$t1,lsr#1 - ldr $t3,[sp,#`$Xoff+8*(16-14)`+4] - eor $Tlo,$Tlo,$t1,lsl#31 - eor $Thi,$Thi,$t0,lsl#31 - eor $Tlo,$Tlo,$t0,lsr#8 - eor $Thi,$Thi,$t1,lsr#8 - eor $Tlo,$Tlo,$t1,lsl#24 - eor $Thi,$Thi,$t0,lsl#24 - eor $Tlo,$Tlo,$t0,lsr#7 - eor $Thi,$Thi,$t1,lsr#7 - eor $Tlo,$Tlo,$t1,lsl#25 - - @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) - @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 - @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 - mov $t0,$t2,lsr#19 - mov $t1,$t3,lsr#19 - eor $t0,$t0,$t3,lsl#13 - eor $t1,$t1,$t2,lsl#13 - eor $t0,$t0,$t3,lsr#29 - eor $t1,$t1,$t2,lsr#29 - eor $t0,$t0,$t2,lsl#3 - eor $t1,$t1,$t3,lsl#3 - eor $t0,$t0,$t2,lsr#6 - eor $t1,$t1,$t3,lsr#6 - ldr $t2,[sp,#`$Xoff+8*(16-9)`+0] - eor $t0,$t0,$t3,lsl#26 - - ldr $t3,[sp,#`$Xoff+8*(16-9)`+4] - adds $Tlo,$Tlo,$t0 - ldr $t0,[sp,#`$Xoff+8*16`+0] - adc $Thi,$Thi,$t1 - - ldr $t1,[sp,#`$Xoff+8*16`+4] - adds $Tlo,$Tlo,$t2 - adc $Thi,$Thi,$t3 - adds $Tlo,$Tlo,$t0 - adc $Thi,$Thi,$t1 -___ - &BODY_00_15(0x17); -$code.=<<___; -#if __ARM_ARCH__>=7 - ittt eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0] - ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4] - beq .L16_79 - bic $Ktbl,$Ktbl,#1 - - ldr $Tlo,[sp,#$Boff+0] - ldr $Thi,[sp,#$Boff+4] - ldr $t0, [$ctx,#$Aoff+$lo] - ldr $t1, [$ctx,#$Aoff+$hi] - ldr $t2, [$ctx,#$Boff+$lo] - ldr $t3, [$ctx,#$Boff+$hi] - adds $t0,$Alo,$t0 - str $t0, [$ctx,#$Aoff+$lo] - adc $t1,$Ahi,$t1 - str $t1, [$ctx,#$Aoff+$hi] - adds $t2,$Tlo,$t2 - str $t2, [$ctx,#$Boff+$lo] - adc $t3,$Thi,$t3 - str $t3, [$ctx,#$Boff+$hi] - - ldr $Alo,[sp,#$Coff+0] - ldr $Ahi,[sp,#$Coff+4] - ldr $Tlo,[sp,#$Doff+0] - ldr $Thi,[sp,#$Doff+4] - ldr $t0, [$ctx,#$Coff+$lo] - ldr $t1, [$ctx,#$Coff+$hi] - ldr $t2, [$ctx,#$Doff+$lo] - ldr $t3, [$ctx,#$Doff+$hi] - adds $t0,$Alo,$t0 - str $t0, [$ctx,#$Coff+$lo] - adc $t1,$Ahi,$t1 - str $t1, [$ctx,#$Coff+$hi] - adds $t2,$Tlo,$t2 - str $t2, [$ctx,#$Doff+$lo] - adc $t3,$Thi,$t3 - str $t3, [$ctx,#$Doff+$hi] - - ldr $Tlo,[sp,#$Foff+0] - ldr $Thi,[sp,#$Foff+4] - ldr $t0, [$ctx,#$Eoff+$lo] - ldr $t1, [$ctx,#$Eoff+$hi] - ldr $t2, [$ctx,#$Foff+$lo] - ldr $t3, [$ctx,#$Foff+$hi] - adds $Elo,$Elo,$t0 - str $Elo,[$ctx,#$Eoff+$lo] - adc $Ehi,$Ehi,$t1 - str $Ehi,[$ctx,#$Eoff+$hi] - adds $t2,$Tlo,$t2 - str $t2, [$ctx,#$Foff+$lo] - adc $t3,$Thi,$t3 - str $t3, [$ctx,#$Foff+$hi] - - ldr $Alo,[sp,#$Goff+0] - ldr $Ahi,[sp,#$Goff+4] - ldr $Tlo,[sp,#$Hoff+0] - ldr $Thi,[sp,#$Hoff+4] - ldr $t0, [$ctx,#$Goff+$lo] - ldr $t1, [$ctx,#$Goff+$hi] - ldr $t2, [$ctx,#$Hoff+$lo] - ldr $t3, [$ctx,#$Hoff+$hi] - adds $t0,$Alo,$t0 - str $t0, [$ctx,#$Goff+$lo] - adc $t1,$Ahi,$t1 - str $t1, [$ctx,#$Goff+$hi] - adds $t2,$Tlo,$t2 - str $t2, [$ctx,#$Hoff+$lo] - adc $t3,$Thi,$t3 - str $t3, [$ctx,#$Hoff+$hi] - - add sp,sp,#640 - sub $Ktbl,$Ktbl,#640 - - teq $inp,$len - bne .Loop - - add sp,sp,#8*9 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - bx lr @ interoperable with Thumb ISA:-) -#endif -.size sha512_block_data_order,.-sha512_block_data_order -___ - -{ -my @Sigma0=(28,34,39); -my @Sigma1=(14,18,41); -my @sigma0=(1, 8, 7); -my @sigma1=(19,61,6); - -my $Ktbl="r3"; -my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch - -my @X=map("d$_",(0..15)); -my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23)); - -sub NEON_00_15() { -my $i=shift; -my ($a,$b,$c,$d,$e,$f,$g,$h)=@_; -my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps - -$code.=<<___ if ($i<16 || $i&1); - vshr.u64 $t0,$e,#@Sigma1[0] @ $i -#if $i<16 - vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned -#endif - vshr.u64 $t1,$e,#@Sigma1[1] -#if $i>0 - vadd.i64 $a,$Maj @ h+=Maj from the past -#endif - vshr.u64 $t2,$e,#@Sigma1[2] -___ -$code.=<<___; - vld1.64 {$K},[$Ktbl,:64]! @ K[i++] - vsli.64 $t0,$e,#`64-@Sigma1[0]` - vsli.64 $t1,$e,#`64-@Sigma1[1]` - vmov $Ch,$e - vsli.64 $t2,$e,#`64-@Sigma1[2]` -#if $i<16 && defined(__ARMEL__) - vrev64.8 @X[$i],@X[$i] -#endif - veor $t1,$t0 - vbsl $Ch,$f,$g @ Ch(e,f,g) - vshr.u64 $t0,$a,#@Sigma0[0] - veor $t2,$t1 @ Sigma1(e) - vadd.i64 $T1,$Ch,$h - vshr.u64 $t1,$a,#@Sigma0[1] - vsli.64 $t0,$a,#`64-@Sigma0[0]` - vadd.i64 $T1,$t2 - vshr.u64 $t2,$a,#@Sigma0[2] - vadd.i64 $K,@X[$i%16] - vsli.64 $t1,$a,#`64-@Sigma0[1]` - veor $Maj,$a,$b - vsli.64 $t2,$a,#`64-@Sigma0[2]` - veor $h,$t0,$t1 - vadd.i64 $T1,$K - vbsl $Maj,$c,$b @ Maj(a,b,c) - veor $h,$t2 @ Sigma0(a) - vadd.i64 $d,$T1 - vadd.i64 $Maj,$T1 - @ vadd.i64 $h,$Maj -___ -} - -sub NEON_16_79() { -my $i=shift; - -if ($i&1) { &NEON_00_15($i,@_); return; } - -# 2x-vectorized, therefore runs every 2nd round -my @X=map("q$_",(0..7)); # view @X as 128-bit vector -my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps -my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15 -my $e=@_[4]; # $e from NEON_00_15 -$i /= 2; -$code.=<<___; - vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0] - vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1] - vadd.i64 @_[0],d30 @ h+=Maj from the past - vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2] - vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]` - vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] - vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]` - veor $s1,$t0 - vshr.u64 $t0,$s0,#@sigma0[0] - veor $s1,$t1 @ sigma1(X[i+14]) - vshr.u64 $t1,$s0,#@sigma0[1] - vadd.i64 @X[$i%8],$s1 - vshr.u64 $s1,$s0,#@sigma0[2] - vsli.64 $t0,$s0,#`64-@sigma0[0]` - vsli.64 $t1,$s0,#`64-@sigma0[1]` - vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9] - veor $s1,$t0 - vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15 - vadd.i64 @X[$i%8],$s0 - vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15 - veor $s1,$t1 @ sigma0(X[i+1]) - vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15 - vadd.i64 @X[$i%8],$s1 -___ - &NEON_00_15(2*$i,@_); -} - -$code.=<<___; -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.global sha512_block_data_order_neon -.type sha512_block_data_order_neon,%function -.align 4 -sha512_block_data_order_neon: -.LNEON: - dmb @ errata #451034 on early Cortex A8 - add $len,$inp,$len,lsl#7 @ len to point at the end of inp - VFP_ABI_PUSH - adr $Ktbl,.Lsha512_block_data_order - sub $Ktbl,$Ktbl,.Lsha512_block_data_order-K512 - vldmia $ctx,{$A-$H} @ load context -.Loop_neon: -___ -for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); } -$code.=<<___; - mov $cnt,#4 -.L16_79_neon: - subs $cnt,#1 -___ -for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); } -$code.=<<___; - bne .L16_79_neon - - vadd.i64 $A,d30 @ h+=Maj from the past - vldmia $ctx,{d24-d31} @ load context to temp - vadd.i64 q8,q12 @ vectorized accumulate - vadd.i64 q9,q13 - vadd.i64 q10,q14 - vadd.i64 q11,q15 - vstmia $ctx,{$A-$H} @ save context - teq $inp,$len - sub $Ktbl,#640 @ rewind K512 - bne .Loop_neon - - VFP_ABI_POP - ret @ bx lr -.size sha512_block_data_order_neon,.-sha512_block_data_order_neon -#endif -___ -} -$code.=<<___; -.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm OPENSSL_armcap_P,4,4 -#endif -___ - -$code =~ s/\`([^\`]*)\`/eval $1/gem; -$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 -$code =~ s/\bret\b/bx lr/gm; - -open SELF,$0; -while(<SELF>) { - next if (/^#!/); - last if (!s/^#/@/ and !/^$/); - print; -} -close SELF; - -print $code; -close STDOUT; # enforce flush diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c deleted file mode 100644 index f8a6480889b1..000000000000 --- a/arch/arm/crypto/sha512-glue.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sha512-glue.c - accelerated SHA-384/512 for ARM - * - * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <crypto/internal/hash.h> -#include <crypto/sha2.h> -#include <crypto/sha512_base.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include "sha512.h" - -MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); - -MODULE_ALIAS_CRYPTO("sha384"); -MODULE_ALIAS_CRYPTO("sha512"); -MODULE_ALIAS_CRYPTO("sha384-arm"); -MODULE_ALIAS_CRYPTO("sha512-arm"); - -asmlinkage void sha512_block_data_order(struct sha512_state *state, - u8 const *src, int blocks); - -static int sha512_arm_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return sha512_base_do_update_blocks(desc, data, len, - sha512_block_data_order); -} - -static int sha512_arm_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha512_base_do_finup(desc, data, len, sha512_block_data_order); - return sha512_base_finish(desc, out); -} - -static struct shash_alg sha512_arm_algs[] = { { - .init = sha384_base_init, - .update = sha512_arm_update, - .finup = sha512_arm_finup, - .descsize = SHA512_STATE_SIZE, - .digestsize = SHA384_DIGEST_SIZE, - .base = { - .cra_name = "sha384", - .cra_driver_name = "sha384-arm", - .cra_priority = 250, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | - CRYPTO_AHASH_ALG_FINUP_MAX, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .init = sha512_base_init, - .update = sha512_arm_update, - .finup = sha512_arm_finup, - .descsize = SHA512_STATE_SIZE, - .digestsize = SHA512_DIGEST_SIZE, - .base = { - .cra_name = "sha512", - .cra_driver_name = "sha512-arm", - .cra_priority = 250, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | - CRYPTO_AHASH_ALG_FINUP_MAX, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha512_arm_mod_init(void) -{ - int err; - - err = crypto_register_shashes(sha512_arm_algs, - ARRAY_SIZE(sha512_arm_algs)); - if (err) - return err; - - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { - err = crypto_register_shashes(sha512_neon_algs, - ARRAY_SIZE(sha512_neon_algs)); - if (err) - goto err_unregister; - } - return 0; - -err_unregister: - crypto_unregister_shashes(sha512_arm_algs, - ARRAY_SIZE(sha512_arm_algs)); - - return err; -} - -static void __exit sha512_arm_mod_fini(void) -{ - crypto_unregister_shashes(sha512_arm_algs, - ARRAY_SIZE(sha512_arm_algs)); - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) - crypto_unregister_shashes(sha512_neon_algs, - ARRAY_SIZE(sha512_neon_algs)); -} - -module_init(sha512_arm_mod_init); -module_exit(sha512_arm_mod_fini); diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c deleted file mode 100644 index bd528077fefb..000000000000 --- a/arch/arm/crypto/sha512-neon-glue.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sha512-neon-glue.c - accelerated SHA-384/512 for ARM NEON - * - * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#include <asm/neon.h> -#include <crypto/internal/hash.h> -#include <crypto/sha2.h> -#include <crypto/sha512_base.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include "sha512.h" - -MODULE_ALIAS_CRYPTO("sha384-neon"); -MODULE_ALIAS_CRYPTO("sha512-neon"); - -asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, - const u8 *src, int blocks); - -static int sha512_neon_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - int remain; - - kernel_neon_begin(); - remain = sha512_base_do_update_blocks(desc, data, len, - sha512_block_data_order_neon); - kernel_neon_end(); - return remain; -} - -static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - kernel_neon_begin(); - sha512_base_do_finup(desc, data, len, sha512_block_data_order_neon); - kernel_neon_end(); - return sha512_base_finish(desc, out); -} - -struct shash_alg sha512_neon_algs[] = { { - .init = sha384_base_init, - .update = sha512_neon_update, - .finup = sha512_neon_finup, - .descsize = SHA512_STATE_SIZE, - .digestsize = SHA384_DIGEST_SIZE, - .base = { - .cra_name = "sha384", - .cra_driver_name = "sha384-neon", - .cra_priority = 300, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | - CRYPTO_AHASH_ALG_FINUP_MAX, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_module = THIS_MODULE, - - } -}, { - .init = sha512_base_init, - .update = sha512_neon_update, - .finup = sha512_neon_finup, - .descsize = SHA512_STATE_SIZE, - .digestsize = SHA512_DIGEST_SIZE, - .base = { - .cra_name = "sha512", - .cra_driver_name = "sha512-neon", - .cra_priority = 300, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | - CRYPTO_AHASH_ALG_FINUP_MAX, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h deleted file mode 100644 index eeaee52cda69..000000000000 --- a/arch/arm/crypto/sha512.h +++ /dev/null @@ -1,3 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -extern struct shash_alg sha512_neon_algs[2]; |