diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-29 16:55:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-29 16:55:29 -0700 |
commit | bb78c145f7f08fda40bcec397939b01be4366c51 (patch) | |
tree | b96d764a6e13b75e58b1ed01ca66ef180f8c5353 | |
parent | 04d29e3609b62896b94b60250d475f8f7c15db98 (diff) | |
parent | 4fdc3431e03b9c11803f399f91837fca487029a1 (diff) |
Merge tag 'x86_core_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu updates from Borislav Petkov:
- Add helpers for WB{NO,}INVD with the purpose of using them in KVM and
thus diminish the number of invalidations needed. With preceding
cleanups, as always
* tag 'x86_core_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/lib: Add WBINVD and WBNOINVD helpers to target multiple CPUs
x86/lib: Add WBNOINVD helper functions
x86/lib: Drop the unused return value from wbinvd_on_all_cpus()
drm/gpu: Remove dead checks on wbinvd_on_all_cpus()'s return value
-rw-r--r-- | arch/x86/include/asm/smp.h | 23 | ||||
-rw-r--r-- | arch/x86/include/asm/special_insns.h | 29 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 3 | ||||
-rw-r--r-- | arch/x86/lib/cache-smp.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_cache.c | 9 |
5 files changed, 76 insertions, 14 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 0c1c68039d6f..22bfebe6776d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -112,7 +112,10 @@ void __noreturn hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); -int wbinvd_on_all_cpus(void); +void wbinvd_on_all_cpus(void); +void wbinvd_on_cpus_mask(struct cpumask *cpus); +void wbnoinvd_on_all_cpus(void); +void wbnoinvd_on_cpus_mask(struct cpumask *cpus); void smp_kick_mwait_play_dead(void); void __noreturn mwait_play_dead(unsigned int eax_hint); @@ -148,10 +151,24 @@ static inline struct cpumask *cpu_l2c_shared_mask(int cpu) #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() -static inline int wbinvd_on_all_cpus(void) +static inline void wbinvd_on_all_cpus(void) { wbinvd(); - return 0; +} + +static inline void wbinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbinvd(); +} + +static inline void wbnoinvd_on_all_cpus(void) +{ + wbnoinvd(); +} + +static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbnoinvd(); } static inline struct cpumask *cpu_llc_shared_mask(int cpu) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index ecda17efa042..fde2bd7af19e 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -104,9 +104,36 @@ static inline void wrpkru(u32 pkru) } #endif +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, and then invalidate all caches. Depending + * on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect + * lower level caches associated with another logical processor that shares any + * level of this processor's cache hierarchy. + */ static __always_inline void wbinvd(void) { - asm volatile("wbinvd": : :"memory"); + asm volatile("wbinvd" : : : "memory"); +} + +/* Instruction encoding provided for binutils backwards compatibility. */ +#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09) + +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, but do NOT explicitly invalidate caches, + * i.e. leave all/most cache lines in the hierarchy in non-modified state. + */ +static __always_inline void wbnoinvd(void) +{ + /* + * Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even + * though WBNOINVD is backwards compatible (it's simply WBINVD with an + * ignored REP prefix), to guarantee that WBNOINVD isn't used if it + * needs to be avoided for any reason. For all supported usage in the + * kernel, WBINVD is functionally a superset of WBNOINVD. + */ + alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD); } static inline unsigned long __read_cr4(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 93636f77c42d..f2a57a1136d2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8295,8 +8295,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); - on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, - wbinvd_ipi, NULL, 1); + wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c index 7af743bd3b13..c5c60d07308c 100644 --- a/arch/x86/lib/cache-smp.c +++ b/arch/x86/lib/cache-smp.c @@ -14,9 +14,31 @@ void wbinvd_on_cpu(int cpu) } EXPORT_SYMBOL(wbinvd_on_cpu); -int wbinvd_on_all_cpus(void) +void wbinvd_on_all_cpus(void) { on_each_cpu(__wbinvd, NULL, 1); - return 0; } EXPORT_SYMBOL(wbinvd_on_all_cpus); + +void wbinvd_on_cpus_mask(struct cpumask *cpus) +{ + on_each_cpu_mask(cpus, __wbinvd, NULL, 1); +} +EXPORT_SYMBOL_GPL(wbinvd_on_cpus_mask); + +static void __wbnoinvd(void *dummy) +{ + wbnoinvd(); +} + +void wbnoinvd_on_all_cpus(void) +{ + on_each_cpu(__wbnoinvd, NULL, 1); +} +EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus); + +void wbnoinvd_on_cpus_mask(struct cpumask *cpus) +{ + on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1); +} +EXPORT_SYMBOL_GPL(wbnoinvd_on_cpus_mask); diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 7051c9c909c2..ea1d2d5d2c66 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -93,8 +93,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #elif defined(__powerpc__) unsigned long i; @@ -139,8 +138,7 @@ drm_clflush_sg(struct sg_table *st) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif @@ -172,8 +170,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif |