summaryrefslogtreecommitdiff
path: root/arch/x86/crypto/aesni-intel_glue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-11 09:46:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-11 09:46:12 -0700
commitb753101a4ac0b906064a72feec43f5b80a1fe2e5 (patch)
tree90f8f851a5de81d17f2154526a13d91f07075bad /arch/x86/crypto/aesni-intel_glue.c
parentc7850ae4d71c48ab352bb4245bc24f7e984fe721 (diff)
parent00d76a0c19436c88ffcafa1e816f0c0c9af082a0 (diff)
Merge tag 'kbuild-v5.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild
Pull more Kbuild updates from Masahiro Yamada: - raise minimum supported binutils version to 2.23 - remove old CONFIG_AS_* macros that we know binutils >= 2.23 supports - move remaining CONFIG_AS_* tests to Kconfig from Makefile - enable -Wtautological-compare warnings to catch more issues - do not support GCC plugins for GCC <= 4.7 - fix various breakages of 'make xconfig' - include the linker version used for linking the kernel into LINUX_COMPILER, which is used for the banner, and also exposed to /proc/version - link lib-y objects to vmlinux forcibly when CONFIG_MODULES=y, which allows us to remove the lib-ksyms.o workaround, and to solve the last known issue of the LLVM linker - add dummy tools in scripts/dummy-tools/ to enable all compiler tests in Kconfig, which will be useful for distro maintainers - support the single switch, LLVM=1 to use Clang and all LLVM utilities instead of GCC and Binutils. - support LLVM_IAS=1 to enable the integrated assembler, which is still experimental * tag 'kbuild-v5.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild: (36 commits) kbuild: fix comment about missing include guard detection kbuild: support LLVM=1 to switch the default tools to Clang/LLVM kbuild: replace AS=clang with LLVM_IAS=1 kbuild: add dummy toolchains to enable all cc-option etc. in Kconfig kbuild: link lib-y objects to vmlinux forcibly when CONFIG_MODULES=y MIPS: fw: arc: add __weak to prom_meminit and prom_free_prom_memory kbuild: remove -I$(srctree)/tools/include from scripts/Makefile kbuild: do not pass $(KBUILD_CFLAGS) to scripts/mkcompile_h Documentation/llvm: fix the name of llvm-size kbuild: mkcompile_h: Include $LD version in /proc/version kconfig: qconf: Fix a few alignment issues kconfig: qconf: remove some old bogus TODOs kconfig: qconf: fix support for the split view mode kconfig: qconf: fix the content of the main widget kconfig: qconf: Change title for the item window kconfig: qconf: clean deprecated warnings gcc-plugins: drop support for GCC <= 4.7 kbuild: Enable -Wtautological-compare x86: update AS_* macros to binutils >=2.23, supporting ADX and AVX2 crypto: x86 - clean up poly1305-x86_64-cryptogams.S by 'make clean' ...
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c21
1 files changed, 1 insertions, 20 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 75b6ea20491e..ad8a7188a2bf 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -185,7 +185,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
.finalize = &aesni_gcm_finalize,
};
-#ifdef CONFIG_AS_AVX
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
@@ -234,9 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
.finalize = &aesni_gcm_finalize_avx_gen2,
};
-#endif
-
-#ifdef CONFIG_AS_AVX2
/*
* asmlinkage void aesni_gcm_init_avx_gen4()
* gcm_data *my_ctx_data, context data
@@ -279,8 +275,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
.finalize = &aesni_gcm_finalize_avx_gen4,
};
-#endif
-
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@@ -476,7 +470,6 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
-#ifdef CONFIG_AS_AVX
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv)
{
@@ -493,7 +486,6 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
else
aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
}
-#endif
static int ctr_crypt(struct skcipher_request *req)
{
@@ -711,14 +703,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
if (!enc)
left -= auth_tag_len;
-#ifdef CONFIG_AS_AVX2
if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
gcm_tfm = &aesni_gcm_tfm_avx_gen2;
-#endif
-#ifdef CONFIG_AS_AVX
if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
gcm_tfm = &aesni_gcm_tfm_sse;
-#endif
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length &&
@@ -1076,31 +1064,24 @@ static int __init aesni_init(void)
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
#ifdef CONFIG_X86_64
-#ifdef CONFIG_AS_AVX2
if (boot_cpu_has(X86_FEATURE_AVX2)) {
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
} else
-#endif
-#ifdef CONFIG_AS_AVX
if (boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX version of gcm_enc/dec engaged.\n");
aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
- } else
-#endif
- {
+ } else {
pr_info("SSE version of gcm_enc/dec engaged.\n");
aesni_gcm_tfm = &aesni_gcm_tfm_sse;
}
aesni_ctr_enc_tfm = aesni_ctr_enc;
-#ifdef CONFIG_AS_AVX
if (boot_cpu_has(X86_FEATURE_AVX)) {
/* optimize performance of ctr mode encryption transform */
aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
pr_info("AES CTR mode by8 optimization enabled\n");
}
#endif
-#endif
err = crypto_register_alg(&aesni_cipher_alg);
if (err)