summaryrefslogtreecommitdiff
path: root/arch/x86/crypto/aria-aesni-avx2-asm_64.S
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2023-05-09 15:03:40 +0200
committerMaxime Ripard <maxime@cerno.tech>2023-05-09 15:03:40 +0200
commitff32fcca64437f679a2bf1c0a19d5def389a18e2 (patch)
tree122863d5d6159b30fd6834cbe599f8ce1b9e8144 /arch/x86/crypto/aria-aesni-avx2-asm_64.S
parent79c87edd18ec49f5b6fb40175bd1b1fea9398fdb (diff)
parentac9a78681b921877518763ba0e89202254349d1b (diff)
Merge drm/drm-next into drm-misc-next
Start the 6.5 release cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'arch/x86/crypto/aria-aesni-avx2-asm_64.S')
-rw-r--r--arch/x86/crypto/aria-aesni-avx2-asm_64.S28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/crypto/aria-aesni-avx2-asm_64.S b/arch/x86/crypto/aria-aesni-avx2-asm_64.S
index 82a14b4ad920..c60fa2980630 100644
--- a/arch/x86/crypto/aria-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/aria-aesni-avx2-asm_64.S
@@ -96,7 +96,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti128 .Lshufb_16x16b, a0; \
+ vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -148,7 +148,7 @@
transpose_4x4(c0, c1, c2, c3, a0, a1); \
transpose_4x4(d0, d1, d2, d3, a0, a1); \
\
- vbroadcasti128 .Lshufb_16x16b, a0; \
+ vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
vmovdqu st1, a1; \
vpshufb a0, a2, a2; \
vpshufb a0, a3, a3; \
@@ -307,11 +307,11 @@
x4, x5, x6, x7, \
t0, t1, t2, t3, \
t4, t5, t6, t7) \
- vpbroadcastq .Ltf_s2_bitmatrix, t0; \
- vpbroadcastq .Ltf_inv_bitmatrix, t1; \
- vpbroadcastq .Ltf_id_bitmatrix, t2; \
- vpbroadcastq .Ltf_aff_bitmatrix, t3; \
- vpbroadcastq .Ltf_x2_bitmatrix, t4; \
+ vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0; \
+ vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1; \
+ vpbroadcastq .Ltf_id_bitmatrix(%rip), t2; \
+ vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3; \
+ vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4; \
vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
@@ -332,12 +332,12 @@
t4, t5, t6, t7) \
vpxor t7, t7, t7; \
vpxor t6, t6, t6; \
- vbroadcasti128 .Linv_shift_row, t0; \
- vbroadcasti128 .Lshift_row, t1; \
- vbroadcasti128 .Ltf_lo__inv_aff__and__s2, t2; \
- vbroadcasti128 .Ltf_hi__inv_aff__and__s2, t3; \
- vbroadcasti128 .Ltf_lo__x2__and__fwd_aff, t4; \
- vbroadcasti128 .Ltf_hi__x2__and__fwd_aff, t5; \
+ vbroadcasti128 .Linv_shift_row(%rip), t0; \
+ vbroadcasti128 .Lshift_row(%rip), t1; \
+ vbroadcasti128 .Ltf_lo__inv_aff__and__s2(%rip), t2; \
+ vbroadcasti128 .Ltf_hi__inv_aff__and__s2(%rip), t3; \
+ vbroadcasti128 .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
+ vbroadcasti128 .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
\
vextracti128 $1, x0, t6##_x; \
vaesenclast t7##_x, x0##_x, x0##_x; \
@@ -369,7 +369,7 @@
vaesdeclast t7##_x, t6##_x, t6##_x; \
vinserti128 $1, t6##_x, x6, x6; \
\
- vpbroadcastd .L0f0f0f0f, t6; \
+ vpbroadcastd .L0f0f0f0f(%rip), t6; \
\
/* AES inverse shift rows */ \
vpshufb t0, x0, x0; \