diff options
34 files changed, 55 insertions, 239 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9d034a987c6e..fa3b616af03a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -412,10 +412,6 @@ config HAVE_INTEL_TXT def_bool y depends on INTEL_IOMMU && ACPI -config X86_64_SMP - def_bool y - depends on X86_64 && SMP - config ARCH_SUPPORTS_UPROBES def_bool y diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index c827f694fb72..b1c59fb0a4c9 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -1,26 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. -config AS_AVX512 - def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) - help - Supported by binutils >= 2.25 and LLVM integrated assembler - -config AS_GFNI - def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2) - help - Supported by binutils >= 2.30 and LLVM integrated assembler - -config AS_VAES - def_bool $(as-instr,vaesenc %ymm0$(comma)%ymm1$(comma)%ymm2) - help - Supported by binutils >= 2.30 and LLVM integrated assembler - -config AS_VPCLMULQDQ - def_bool $(as-instr,vpclmulqdq \$0x10$(comma)%ymm0$(comma)%ymm1$(comma)%ymm2) - help - Supported by binutils >= 2.30 and LLVM integrated assembler - config AS_WRUSS def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) help diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h index 8518ae214c9b..79e15971529d 100644 --- a/arch/x86/boot/bitops.h +++ b/arch/x86/boot/bitops.h @@ -27,7 +27,7 @@ static inline bool variable_test_bit(int nr, const void *addr) bool v; const u32 *p = addr; - asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr)); + asm("btl %2,%1" : "=@ccc" (v) : "m" (*p), "Ir" (nr)); return v; } diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 60580836daf7..a3c58ebe3662 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -155,15 +155,15 @@ static inline void wrgs32(u32 v, addr_t addr) static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) { bool diff; - asm volatile("fs repe cmpsb" CC_SET(nz) - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); + asm volatile("fs repe cmpsb" + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) { bool diff; - asm volatile("gs repe cmpsb" CC_SET(nz) - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); + asm volatile("gs repe cmpsb" + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index f35369bb14c5..b25c6a9303b7 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -32,8 +32,8 @@ int memcmp(const void *s1, const void *s2, size_t len) { bool diff; - asm("repe cmpsb" CC_SET(nz) - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); + asm("repe cmpsb" + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index d9c6fc78cf33..48d3076b6053 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -306,7 +306,7 @@ config CRYPTO_ARIA_AESNI_AVX2_X86_64 config CRYPTO_ARIA_GFNI_AVX512_X86_64 tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)" - depends on 64BIT && AS_GFNI + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_ALGAPI select CRYPTO_ARIA diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index dfba7e5e88ea..2d30d5d36145 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -46,10 +46,8 @@ obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ aes-gcm-aesni-x86_64.o \ - aes-xts-avx-x86_64.o -ifeq ($(CONFIG_AS_VAES)$(CONFIG_AS_VPCLMULQDQ),yy) -aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o -endif + aes-xts-avx-x86_64.o \ + aes-gcm-avx10-x86_64.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o diff --git a/arch/x86/crypto/aes-ctr-avx-x86_64.S b/arch/x86/crypto/aes-ctr-avx-x86_64.S index bbbfd80f5a50..2745918f68ee 100644 --- a/arch/x86/crypto/aes-ctr-avx-x86_64.S +++ b/arch/x86/crypto/aes-ctr-avx-x86_64.S @@ -552,7 +552,6 @@ SYM_TYPED_FUNC_START(aes_xctr_crypt_aesni_avx) _aes_ctr_crypt 1 SYM_FUNC_END(aes_xctr_crypt_aesni_avx) -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) .set VL, 32 .set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) @@ -570,4 +569,3 @@ SYM_FUNC_END(aes_ctr64_crypt_vaes_avx512) SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512) _aes_ctr_crypt 1 SYM_FUNC_END(aes_xctr_crypt_vaes_avx512) -#endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S index db79cdf81588..a30753a3e207 100644 --- a/arch/x86/crypto/aes-xts-avx-x86_64.S +++ b/arch/x86/crypto/aes-xts-avx-x86_64.S @@ -886,7 +886,6 @@ SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx) _aes_xts_crypt 0 SYM_FUNC_END(aes_xts_decrypt_aesni_avx) -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) .set VL, 32 .set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2) @@ -904,4 +903,3 @@ SYM_FUNC_END(aes_xts_encrypt_vaes_avx512) SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512) _aes_xts_crypt 0 SYM_FUNC_END(aes_xts_decrypt_vaes_avx512) -#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 061b1ced93c5..d953ac470aae 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -828,10 +828,8 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ }} DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800); -#endif /* The common part of the x86_64 AES-GCM key struct */ struct aes_gcm_key { @@ -912,17 +910,8 @@ struct aes_gcm_key_avx10 { #define FLAG_RFC4106 BIT(0) #define FLAG_ENC BIT(1) #define FLAG_AVX BIT(2) -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) -# define FLAG_AVX10_256 BIT(3) -# define FLAG_AVX10_512 BIT(4) -#else - /* - * This should cause all calls to the AVX10 assembly functions to be - * optimized out, avoiding the need to ifdef each call individually. - */ -# define FLAG_AVX10_256 0 -# define FLAG_AVX10_512 0 -#endif +#define FLAG_AVX10_256 BIT(3) +#define FLAG_AVX10_512 BIT(4) static inline struct aes_gcm_key * aes_gcm_key_get(struct crypto_aead *tfm, int flags) @@ -1519,7 +1508,6 @@ DEFINE_GCM_ALGS(aesni_avx, FLAG_AVX, "generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx", AES_GCM_KEY_AESNI_SIZE, 500); -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) /* aes_gcm_algs_vaes_avx10_256 */ DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256, "generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256", @@ -1529,7 +1517,6 @@ DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256, DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512, "generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512", AES_GCM_KEY_AVX10_SIZE, 800); -#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ static int __init register_avx_algs(void) { @@ -1551,7 +1538,6 @@ static int __init register_avx_algs(void) * Similarly, the assembler support was added at about the same time. * For simplicity, just always check for VAES and VPCLMULQDQ together. */ -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_VAES) || !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) || @@ -1592,7 +1578,7 @@ static int __init register_avx_algs(void) ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512)); if (err) return err; -#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ + return 0; } @@ -1607,12 +1593,10 @@ static void unregister_avx_algs(void) { unregister_skciphers(skcipher_algs_aesni_avx); unregister_aeads(aes_gcm_algs_aesni_avx); -#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) unregister_skciphers(skcipher_algs_vaes_avx2); unregister_skciphers(skcipher_algs_vaes_avx512); unregister_aeads(aes_gcm_algs_vaes_avx10_256); unregister_aeads(aes_gcm_algs_vaes_avx10_512); -#endif } #else /* CONFIG_X86_64 */ static struct aead_alg aes_gcm_algs_aesni[0]; diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S index 9556dacd9841..932fb17308e7 100644 --- a/arch/x86/crypto/aria-aesni-avx-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S @@ -295,7 +295,6 @@ vpshufb t1, t0, t2; \ vpxor t2, x7, x7; -#ifdef CONFIG_AS_GFNI #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -318,8 +317,6 @@ vgf2p8affineinvqb $0, t2, x3, x3; \ vgf2p8affineinvqb $0, t2, x7, x7 -#endif /* CONFIG_AS_GFNI */ - #define aria_sbox_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -561,7 +558,6 @@ y4, y5, y6, y7, \ mem_tmp, 8); -#ifdef CONFIG_AS_GFNI #define aria_fe_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ @@ -719,8 +715,6 @@ y4, y5, y6, y7, \ mem_tmp, 8); -#endif /* CONFIG_AS_GFNI */ - /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 @@ -772,7 +766,6 @@ .Ltf_hi__x2__and__fwd_aff: .octa 0x3F893781E95FE1576CDA64D2BA0CB204 -#ifdef CONFIG_AS_GFNI /* AES affine: */ #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) .Ltf_aff_bitmatrix: @@ -871,7 +864,6 @@ BV8(0, 0, 0, 0, 0, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 1, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) -#endif /* CONFIG_AS_GFNI */ /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 @@ -1140,7 +1132,6 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) -#ifdef CONFIG_AS_GFNI SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) /* input: * %r9: rk @@ -1359,4 +1350,3 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way) -#endif /* CONFIG_AS_GFNI */ diff --git a/arch/x86/crypto/aria-aesni-avx2-asm_64.S b/arch/x86/crypto/aria-aesni-avx2-asm_64.S index c60fa2980630..ed53d4f46bd7 100644 --- a/arch/x86/crypto/aria-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx2-asm_64.S @@ -302,7 +302,6 @@ vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ vpxor t0, x7, x7; -#ifdef CONFIG_AS_GFNI #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -325,7 +324,6 @@ vgf2p8affineinvqb $0, t2, x3, x3; \ vgf2p8affineinvqb $0, t2, x7, x7 -#endif /* CONFIG_AS_GFNI */ #define aria_sbox_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -598,7 +596,7 @@ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); -#ifdef CONFIG_AS_GFNI + #define aria_fe_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ @@ -752,7 +750,6 @@ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); -#endif /* CONFIG_AS_GFNI */ .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 .align 32 @@ -806,7 +803,6 @@ .Ltf_hi__x2__and__fwd_aff: .octa 0x3F893781E95FE1576CDA64D2BA0CB204 -#ifdef CONFIG_AS_GFNI .section .rodata.cst8, "aM", @progbits, 8 .align 8 /* AES affine: */ @@ -868,8 +864,6 @@ BV8(0, 0, 0, 0, 0, 0, 1, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) -#endif /* CONFIG_AS_GFNI */ - /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 @@ -1219,7 +1213,6 @@ SYM_TYPED_FUNC_START(aria_aesni_avx2_ctr_crypt_32way) RET; SYM_FUNC_END(aria_aesni_avx2_ctr_crypt_32way) -#ifdef CONFIG_AS_GFNI SYM_FUNC_START_LOCAL(__aria_aesni_avx2_gfni_crypt_32way) /* input: * %r9: rk @@ -1438,4 +1431,3 @@ SYM_TYPED_FUNC_START(aria_aesni_avx2_gfni_ctr_crypt_32way) FRAME_END RET; SYM_FUNC_END(aria_aesni_avx2_gfni_ctr_crypt_32way) -#endif /* CONFIG_AS_GFNI */ diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c index 007b250f774c..1487a49bfbac 100644 --- a/arch/x86/crypto/aria_aesni_avx2_glue.c +++ b/arch/x86/crypto/aria_aesni_avx2_glue.c @@ -26,7 +26,6 @@ asmlinkage void aria_aesni_avx2_ctr_crypt_32way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way); -#ifdef CONFIG_AS_GFNI asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way); @@ -37,7 +36,6 @@ asmlinkage void aria_aesni_avx2_gfni_ctr_crypt_32way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way); -#endif /* CONFIG_AS_GFNI */ static struct aria_avx_ops aria_ops; @@ -213,7 +211,7 @@ static int __init aria_avx2_init(void) return -ENODEV; } - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { + if (boot_cpu_has(X86_FEATURE_GFNI)) { aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way; diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c index 4c88ef4eba82..e4e3d78915a5 100644 --- a/arch/x86/crypto/aria_aesni_avx_glue.c +++ b/arch/x86/crypto/aria_aesni_avx_glue.c @@ -26,7 +26,6 @@ asmlinkage void aria_aesni_avx_ctr_crypt_16way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx_ctr_crypt_16way); -#ifdef CONFIG_AS_GFNI asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_encrypt_16way); @@ -37,7 +36,6 @@ asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_ctr_crypt_16way); -#endif /* CONFIG_AS_GFNI */ static struct aria_avx_ops aria_ops; @@ -199,7 +197,7 @@ static int __init aria_avx_init(void) return -ENODEV; } - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { + if (boot_cpu_has(X86_FEATURE_GFNI)) { aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way; diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S index 907bd233c6c1..3c1511feadf7 100644 --- a/arch/x86/entry/entry_64_fred.S +++ b/arch/x86/entry/entry_64_fred.S @@ -97,8 +97,7 @@ SYM_FUNC_START(asm_fred_entry_from_kvm) push %rdi /* fred_ss handed in by the caller */ push %rbp pushf - mov $__KERNEL_CS, %rax - push %rax + push $__KERNEL_CS /* * Unlike the IDT event delivery, FRED _always_ pushes an error code diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 02bae8e0758b..4c305305871b 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -23,8 +23,7 @@ static inline bool __must_check rdrand_long(unsigned long *v) unsigned int retry = RDRAND_RETRY_LOOPS; do { asm volatile("rdrand %[out]" - CC_SET(c) - : CC_OUT(c) (ok), [out] "=r" (*v)); + : "=@ccc" (ok), [out] "=r" (*v)); if (ok) return true; } while (--retry); @@ -35,8 +34,7 @@ static inline bool __must_check rdseed_long(unsigned long *v) { bool ok; asm volatile("rdseed %[out]" - CC_SET(c) - : CC_OUT(c) (ok), [out] "=r" (*v)); + : "=@ccc" (ok), [out] "=r" (*v)); return ok; } diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index f963848024a5..d5c8d3afe196 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -122,18 +122,6 @@ static __always_inline __pure void *rip_rel_ptr(void *p) } #endif -/* - * Macros to generate condition code outputs from inline assembly, - * The output operand must be type "bool". - */ -#ifdef __GCC_ASM_FLAG_OUTPUTS__ -# define CC_SET(c) "\n\t/* output condition code " #c "*/\n" -# define CC_OUT(c) "=@cc" #c -#else -# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" -# define CC_OUT(c) [_cc_ ## c] "=qm" -#endif - #ifdef __KERNEL__ # include <asm/extable_fixup_types.h> diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index a835f891164d..c2ce213f2b9b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -99,8 +99,7 @@ static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, { bool negative; asm_inline volatile(LOCK_PREFIX "xorb %2,%1" - CC_SET(s) - : CC_OUT(s) (negative), WBYTE_ADDR(addr) + : "=@ccs" (negative), WBYTE_ADDR(addr) : "iq" ((char)mask) : "memory"); return negative; } @@ -149,8 +148,7 @@ arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) bool oldbit; asm(__ASM_SIZE(bts) " %2,%1" - CC_SET(c) - : CC_OUT(c) (oldbit) + : "=@ccc" (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -175,8 +173,7 @@ arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) bool oldbit; asm volatile(__ASM_SIZE(btr) " %2,%1" - CC_SET(c) - : CC_OUT(c) (oldbit) + : "=@ccc" (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -187,8 +184,7 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) bool oldbit; asm volatile(__ASM_SIZE(btc) " %2,%1" - CC_SET(c) - : CC_OUT(c) (oldbit) + : "=@ccc" (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; @@ -211,8 +207,7 @@ static __always_inline bool constant_test_bit_acquire(long nr, const volatile un bool oldbit; asm volatile("testb %2,%1" - CC_SET(nz) - : CC_OUT(nz) (oldbit) + : "=@ccnz" (oldbit) : "m" (((unsigned char *)addr)[nr >> 3]), "i" (1 << (nr & 7)) :"memory"); @@ -225,8 +220,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l bool oldbit; asm volatile(__ASM_SIZE(bt) " %2,%1" - CC_SET(c) - : CC_OUT(c) (oldbit) + : "=@ccc" (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); return oldbit; diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index b61f32c3459f..a88b06f1c35e 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -166,8 +166,7 @@ extern void __add_wrong_size(void) { \ volatile u8 *__ptr = (volatile u8 *)(_ptr); \ asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \ - CC_SET(z) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [ptr] "+m" (*__ptr), \ [old] "+a" (__old) \ : [new] "q" (__new) \ @@ -178,8 +177,7 @@ extern void __add_wrong_size(void) { \ volatile u16 *__ptr = (volatile u16 *)(_ptr); \ asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \ - CC_SET(z) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [ptr] "+m" (*__ptr), \ [old] "+a" (__old) \ : [new] "r" (__new) \ @@ -190,8 +188,7 @@ extern void __add_wrong_size(void) { \ volatile u32 *__ptr = (volatile u32 *)(_ptr); \ asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \ - CC_SET(z) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [ptr] "+m" (*__ptr), \ [old] "+a" (__old) \ : [new] "r" (__new) \ @@ -202,8 +199,7 @@ extern void __add_wrong_size(void) { \ volatile u64 *__ptr = (volatile u64 *)(_ptr); \ asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \ - CC_SET(z) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [ptr] "+m" (*__ptr), \ [old] "+a" (__old) \ : [new] "r" (__new) \ diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 371f7906019e..1f80a62be969 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -46,8 +46,7 @@ static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new bool ret; \ \ asm_inline volatile(_lock "cmpxchg8b %[ptr]" \ - CC_SET(e) \ - : CC_OUT(e) (ret), \ + : "=@ccz" (ret), \ [ptr] "+m" (*(_ptr)), \ "+a" (o.low), "+d" (o.high) \ : "b" (n.low), "c" (n.high) \ @@ -125,8 +124,7 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 ALTERNATIVE(_lock_loc \ "call cmpxchg8b_emu", \ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \ - CC_SET(e) \ - : ALT_OUTPUT_SP(CC_OUT(e) (ret), \ + : ALT_OUTPUT_SP("=@ccz" (ret), \ "+a" (o.low), "+d" (o.high)) \ : "b" (n.low), "c" (n.high), \ [ptr] "S" (_ptr) \ diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 71d1e72ed879..5afea056fb20 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -66,8 +66,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old, bool ret; \ \ asm_inline volatile(_lock "cmpxchg16b %[ptr]" \ - CC_SET(e) \ - : CC_OUT(e) (ret), \ + : "=@ccz" (ret), \ [ptr] "+m" (*(_ptr)), \ "+a" (o.low), "+d" (o.high) \ : "b" (n.low), "c" (n.high) \ diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index c69e269937c5..76b95bd1a405 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -1,21 +1,8 @@ +/* SPDX-License-Identifier: LGPL-2.0+ */ /* Generic MTRR (Memory Type Range Register) ioctls. Copyright (C) 1997-1999 Richard Gooch - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 6ca6516c7492..e4815e15dc9a 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -36,9 +36,7 @@ static __always_inline void __monitor(const void *eax, u32 ecx, u32 edx) static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) { - /* "monitorx %eax, %ecx, %edx" */ - asm volatile(".byte 0x0f, 0x01, 0xfa" - :: "a" (eax), "c" (ecx), "d"(edx)); + asm volatile("monitorx" :: "a" (eax), "c" (ecx), "d"(edx)); } static __always_inline void __mwait(u32 eax, u32 ecx) @@ -80,9 +78,7 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) { /* No need for TSA buffer clearing on AMD */ - /* "mwaitx %eax, %ebx, %ecx" */ - asm volatile(".byte 0x0f, 0x01, 0xfb" - :: "a" (eax), "b" (ebx), "c" (ecx)); + asm volatile("mwaitx" :: "a" (eax), "b" (ebx), "c" (ecx)); } /* diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b0d03b6c279b..332428caaed2 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -309,8 +309,7 @@ do { \ \ asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \ __percpu_arg([var]) \ - CC_SET(z) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [oval] "+a" (pco_old__), \ [var] "+m" (__my_cpu_var(_var)) \ : [nval] __pcpu_reg_##size(, pco_new__) \ @@ -367,8 +366,7 @@ do { \ asm_inline qual ( \ ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ - CC_SET(z) \ - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ + : ALT_OUTPUT_SP("=@ccz" (success), \ [var] "+m" (__my_cpu_var(_var)), \ "+a" (old__.low), "+d" (old__.high)) \ : "b" (new__.low), "c" (new__.high), \ @@ -436,8 +434,7 @@ do { \ asm_inline qual ( \ ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ - CC_SET(z) \ - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ + : ALT_OUTPUT_SP("=@ccz" (success), \ [var] "+m" (__my_cpu_var(_var)), \ "+a" (old__.low), "+d" (old__.high)) \ : "b" (new__.low), "c" (new__.high), \ @@ -585,8 +582,7 @@ do { \ bool oldbit; \ \ asm volatile("btl %[nr], " __percpu_arg([var]) \ - CC_SET(c) \ - : CC_OUT(c) (oldbit) \ + : "=@ccc" (oldbit) \ : [var] "m" (__my_cpu_var(_var)), \ [nr] "rI" (_nr)); \ oldbit; \ diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 3821ee3fae35..54c8fc430684 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -6,37 +6,15 @@ #define __CLOBBERS_MEM(clb...) "memory", ## clb -#ifndef __GCC_ASM_FLAG_OUTPUTS__ - -/* Use asm goto */ - -#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ -({ \ - bool c = false; \ - asm goto (fullop "; j" #cc " %l[cc_label]" \ - : : [var] "m" (_var), ## __VA_ARGS__ \ - : clobbers : cc_label); \ - if (0) { \ -cc_label: c = true; \ - } \ - c; \ -}) - -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ - -/* Use flags output or a set instruction */ - #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ ({ \ bool c; \ - asm_inline volatile (fullop CC_SET(cc) \ - : [var] "+m" (_var), CC_OUT(cc) (c) \ + asm_inline volatile (fullop \ + : [var] "+m" (_var), "=@cc" #cc (c) \ : __VA_ARGS__ : clobbers); \ c; \ }) -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ - #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index f9046c4b9a2b..0e6c0940100f 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -491,8 +491,7 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) /* "pvalidate" mnemonic support in binutils 2.36 and newer */ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" - CC_SET(c) - : CC_OUT(c) (no_rmpupdate), "=a"(rc) + : "=@ccc"(no_rmpupdate), "=a"(rc) : "a"(vaddr), "c"(rmp_psize), "d"(validate) : "memory", "cc"); diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index c72d46175374..5c03aaa89014 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -83,8 +83,7 @@ static inline int __const_sigismember(sigset_t *set, int _sig) static inline int __gen_sigismember(sigset_t *set, int _sig) { bool ret; - asm("btl %2,%1" CC_SET(c) - : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); + asm("btl %2,%1" : "=@ccc"(ret) : "m"(*set), "Ir"(_sig-1)); return ret; } diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index fde2bd7af19e..46aa2c9c1bda 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -75,9 +75,7 @@ static inline u32 rdpkru(void) * "rdpkru" instruction. Places PKRU contents in to EAX, * clears EDX and requires that ecx=0. */ - asm volatile(".byte 0x0f,0x01,0xee\n\t" - : "=a" (pkru), "=d" (edx) - : "c" (ecx)); + asm volatile("rdpkru" : "=a" (pkru), "=d" (edx) : "c" (ecx)); return pkru; } @@ -89,8 +87,7 @@ static inline void wrpkru(u32 pkru) * "wrpkru" instruction. Loads contents in EAX to PKRU, * requires that ecx = edx = 0. */ - asm volatile(".byte 0x0f,0x01,0xef\n\t" - : : "a" (pkru), "c"(ecx), "d"(edx)); + asm volatile("wrpkru" : : "a" (pkru), "c"(ecx), "d"(edx)); } #else @@ -287,8 +284,7 @@ static inline int enqcmds(void __iomem *dst, const void *src) * See movdir64b()'s comment on operand specification. */ asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" - CC_SET(z) - : CC_OUT(z) (zf), "+m" (*__dst) + : "=@ccz" (zf), "+m" (*__dst) : "m" (*__src), "a" (__dst), "d" (__src)); /* Submission failure is indicated via EFLAGS.ZF=1 */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3a7755c1a441..91a3fb8ae7ff 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -378,7 +378,7 @@ do { \ asm_goto_output("\n" \ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ _ASM_EXTABLE_UA(1b, %l[label]) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ : [new] ltype (__new) \ @@ -397,7 +397,7 @@ do { \ asm_goto_output("\n" \ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ _ASM_EXTABLE_UA(1b, %l[label]) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ "+A" (__old), \ [ptr] "+m" (*_ptr) \ : "b" ((u32)__new), \ @@ -417,11 +417,10 @@ do { \ __typeof__(*(_ptr)) __new = (_new); \ asm volatile("\n" \ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ - CC_SET(z) \ "2:\n" \ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ %[errout]) \ - : CC_OUT(z) (success), \ + : "=@ccz" (success), \ [errout] "+r" (__err), \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 18cf79d6e2c5..763534d77f59 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -1,21 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.0+ /* * MTRR (Memory Type Range Register) cleanup * * Copyright (C) 2009 Yinghai Lu - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the Free - * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/pci.h> diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index ecbda0341a8a..4b3d492afe17 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: LGPL-2.0+ /* Generic MTRR (Memory Type Range Register) driver. Copyright (C) 1997-2000 Richard Gooch Copyright (c) 2002 Patrick Mochel - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h index 99004b02e2ed..42a088a337c5 100644 --- a/arch/x86/kernel/cpu/sgx/encls.h +++ b/arch/x86/kernel/cpu/sgx/encls.h @@ -68,7 +68,7 @@ static inline bool encls_failed(int ret) ({ \ int ret; \ asm volatile( \ - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ + "1: encls\n" \ "2:\n" \ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ : "=a"(ret) \ @@ -111,8 +111,8 @@ static inline bool encls_failed(int ret) ({ \ int ret; \ asm volatile( \ - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ - " xor %%eax,%%eax;\n" \ + "1: encls\n\t" \ + "xor %%eax,%%eax\n" \ "2:\n" \ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ : "=a"(ret), "=b"(rbx_out) \ diff --git a/tools/arch/x86/include/asm/asm.h b/tools/arch/x86/include/asm/asm.h index dbe39b44256b..6e1b357c374b 100644 --- a/tools/arch/x86/include/asm/asm.h +++ b/tools/arch/x86/include/asm/asm.h @@ -108,18 +108,6 @@ #endif -/* - * Macros to generate condition code outputs from inline assembly, - * The output operand must be type "bool". - */ -#ifdef __GCC_ASM_FLAG_OUTPUTS__ -# define CC_SET(c) "\n\t/* output condition code " #c "*/\n" -# define CC_OUT(c) "=@cc" #c -#else -# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" -# define CC_OUT(c) [_cc_ ## c] "=qm" -#endif - #ifdef __KERNEL__ /* Exception table entry */ diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c index 7e25b0e413f6..e697c20951bc 100644 --- a/tools/perf/bench/find-bit-bench.c +++ b/tools/perf/bench/find-bit-bench.c @@ -37,7 +37,7 @@ static noinline void workload(int val) accumulator++; } -#if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__) +#if defined(__i386__) || defined(__x86_64__) static bool asm_test_bit(long nr, const unsigned long *addr) { bool oldbit; |