summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-12 10:17:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-12 10:17:16 -0700
commit4f8a3cc1183c442daee6cc65360e3385021131e4 (patch)
tree030efd9b19604201a4a42e10dd44357e5c2de1bd /arch/x86/include
parent0785249f8b93836986e9d1bdeefd2a2c13f160af (diff)
parente6f8b6c12f03818baacc5f504fe83fa5e20771d6 (diff)
Merge tag 'x86-urgent-2020-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of three patches to fix the fallout of the newly added split lock detection feature. It addressed the case where a KVM guest triggers a split lock #AC and KVM reinjects it into the guest which is not prepared to handle it. Add proper sanity checks which prevent the unconditional injection into the guest and handles the #AC on the host side in the same way as user space detections are handled. Depending on the detection mode it either warns and disables detection for the task or kills the task if the mode is set to fatal" * tag 'x86-urgent-2020-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: KVM: VMX: Extend VMXs #AC interceptor to handle split lock #AC in guest KVM: x86: Emulate split-lock access as a write in emulator x86/split_lock: Provide handle_guest_split_lock()
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/cpu.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index ff6f3ca649b3..dd17c2da1af5 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -44,6 +44,7 @@ unsigned int x86_stepping(unsigned int sig);
extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
+extern bool handle_guest_split_lock(unsigned long ip);
#else
static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
@@ -51,5 +52,10 @@ static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
return false;
}
+
+static inline bool handle_guest_split_lock(unsigned long ip)
+{
+ return false;
+}
#endif
#endif /* _ASM_X86_CPU_H */