From 955997e880175b00161efb83b1b282849ff412d3 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 30 Oct 2023 16:17:28 +0200 Subject: KVM: x86: Use mutex guards to eliminate __kvm_x86_vendor_init() Use the recently introduced guard(mutex) infrastructure acquire and automatically release vendor_module_lock when the guard goes out of scope. Drop the inner __kvm_x86_vendor_init(), its sole purpose was to simplify releasing vendor_module_lock in error paths. No functional change intended. Signed-off-by: Nikolay Borisov Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20231030141728.1406118-1-nik.borisov@suse.com [sean: rewrite changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 363b1c080205..709844fcd521 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9632,11 +9632,13 @@ static void kvm_x86_check_cpu_compat(void *ret) *(int *)ret = kvm_x86_check_processor_compatibility(); } -static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) +int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) { u64 host_pat; int r, cpu; + guard(mutex)(&vendor_module_lock); + if (kvm_x86_ops.hardware_enable) { pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name); return -EEXIST; @@ -9766,17 +9768,6 @@ out_free_x86_emulator_cache: kmem_cache_destroy(x86_emulator_cache); return r; } - -int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) -{ - int r; - - mutex_lock(&vendor_module_lock); - r = __kvm_x86_vendor_init(ops); - mutex_unlock(&vendor_module_lock); - - return r; -} EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); void kvm_x86_vendor_exit(void) -- cgit From e1dda3afe2a9f466940d44db8baaaf6c0ff8793f Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 3 Feb 2024 13:45:22 +0100 Subject: KVM: x86: Fix broken debugregs ABI for 32 bit kernels The ioctl()s to get and set KVM's debug registers are broken for 32 bit kernels as they'd only copy half of the user register state because of a UAPI and in-kernel type mismatch (__u64 vs. unsigned long; 8 vs. 4 bytes). This makes it impossible for userland to set anything but DR0 without resorting to bit folding tricks. Switch to a loop for copying debug registers that'll implicitly do the type conversion for us, if needed. There are likely no users (left) for 32bit KVM, fix the bug nonetheless. Fixes: a1efbe77c1fd ("KVM: x86: Add support for saving&restoring debug registers") Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20240203124522.592778-4-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 709844fcd521..7eb7736cfd76 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5510,9 +5510,14 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { unsigned long val; + unsigned int i; memset(dbgregs, 0, sizeof(*dbgregs)); - memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); + + BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db)); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) + dbgregs->db[i] = vcpu->arch.db[i]; + kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; @@ -5521,6 +5526,8 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { + unsigned int i; + if (dbgregs->flags) return -EINVAL; @@ -5529,7 +5536,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (!kvm_dr7_valid(dbgregs->dr7)) return -EINVAL; - memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) + vcpu->arch.db[i] = dbgregs->db[i]; + kvm_update_dr0123(vcpu); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; -- cgit From 03f6298c7cf6d2c1ccd0961ab9b340502d63840a Mon Sep 17 00:00:00 2001 From: Thomas Prescher Date: Tue, 12 Dec 2023 10:59:37 +0100 Subject: KVM: x86/emulator: emulate movbe with operand-size prefix The MOVBE instruction can come with an operand-size prefix (66h). In this, case the x86 emulation code returns EMULATION_FAILED. It turns out that em_movbe can already handle this case and all that is missing is an entry in respective opcode tables to populate gprefix->pfx_66. Signed-off-by: Thomas Prescher Signed-off-by: Julian Stecklina Acked-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231212095938.26731-1-julian.stecklina@cyberus-technology.de Signed-off-by: Sean Christopherson --- arch/x86/kvm/emulate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e223043ef5b2..c75924f4f737 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4505,11 +4505,11 @@ static const struct instr_dual instr_dual_0f_38_f1 = { }; static const struct gprefix three_byte_0f_38_f0 = { - ID(0, &instr_dual_0f_38_f0), N, N, N + ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N }; static const struct gprefix three_byte_0f_38_f1 = { - ID(0, &instr_dual_0f_38_f1), N, N, N + ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N }; /* -- cgit From 6fd1e3963f20e850d7b3c89485e58d1ae79c309a Mon Sep 17 00:00:00 2001 From: Julian Stecklina Date: Mon, 9 Oct 2023 11:20:53 +0200 Subject: KVM: x86: Clean up partially uninitialized integer in emulate_pop() Explicitly zero out variables passed to emulate_pop() as output params to harden against consuming uninitialized data, and to make sanitizers happy. Many flows that use emulate_pop() pass an "unsigned long" so as to be able to hold the largest possible operand, but the actual number of bytes written is usually the word with of the vCPU. E.g. if the vCPU is in 16-bit or 32-bit mode (on a 64-bit host), the upper portion of the output param will be uninitialized. Passing around the uninitialized data is benign, as actual KVM usage of the output is also tied to the word width, but passing around uninitialized data makes some sanitizers rightly complain. Note, initializing the data in emulate_pop() is not a safe alternative, e.g. it would result in em_leave() clobbering RBP[31:16] if LEAVE were emulated with a 16-bit stack. Signed-off-by: Julian Stecklina Link: https://lore.kernel.org/r/20231009092054.556935-1-julian.stecklina@cyberus-technology.de [sean: massage changelog, drop em_popa() variable size change]] Signed-off-by: Sean Christopherson --- arch/x86/kvm/emulate.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c75924f4f737..711b3250ebaa 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1863,7 +1863,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; - unsigned long val, change_mask; + unsigned long val = 0; + unsigned long change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; int cpl = ctxt->ops->cpl(ctxt); @@ -1954,7 +1955,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt) static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; - unsigned long selector; + unsigned long selector = 0; int rc; rc = emulate_pop(ctxt, &selector, 2); @@ -2000,7 +2001,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; - u32 val; + u32 val = 0; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { @@ -2229,7 +2230,7 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned long eip; + unsigned long eip = 0; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) @@ -2241,7 +2242,8 @@ static int em_ret(struct x86_emulate_ctxt *ctxt) static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned long eip, cs; + unsigned long eip = 0; + unsigned long cs = 0; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct new_desc; @@ -3184,7 +3186,7 @@ fail: static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned long eip; + unsigned long eip = 0; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) -- cgit From 64435aaa4a6aca019fabca7df45e0782d98a477b Mon Sep 17 00:00:00 2001 From: Julian Stecklina Date: Mon, 9 Oct 2023 11:20:54 +0200 Subject: KVM: x86: rename push to emulate_push for consistency push and emulate_pop are counterparts. Rename push to emulate_push and harmonize its function signature with emulate_pop. This should remove a bit of cognitive load when reading this code. Signed-off-by: Julian Stecklina Link: https://lore.kernel.org/r/20231009092054.556935-2-julian.stecklina@cyberus-technology.de Signed-off-by: Sean Christopherson --- arch/x86/kvm/emulate.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 711b3250ebaa..eafb48f0b745 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1820,22 +1820,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) return X86EMUL_CONTINUE; } -static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) +static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len) { struct segmented_address addr; - rsp_increment(ctxt, -bytes); + rsp_increment(ctxt, -len); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; - return segmented_write(ctxt, addr, data, bytes); + return segmented_write(ctxt, addr, data, len); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; - return push(ctxt, &ctxt->src.val, ctxt->op_bytes); + return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, @@ -1921,7 +1921,7 @@ static int em_enter(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); - rc = push(ctxt, &rbp, stack_size(ctxt)); + rc = emulate_push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), -- cgit From fc5375dd8c062d81001b50008d65ca09e942f924 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 9 Feb 2024 14:07:51 -0800 Subject: KVM: x86: Make kvm_get_dr() return a value, not use an out parameter Convert kvm_get_dr()'s output parameter to a return value, and clean up most of the mess that was created by forcing callers to provide a pointer. No functional change intended. Acked-by: Mathias Krause Reviewed-by: Mathias Krause Link: https://lore.kernel.org/r/20240209220752.388160-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/emulate.c | 17 ++++------------- arch/x86/kvm/kvm_emulate.h | 2 +- arch/x86/kvm/smm.c | 15 ++++----------- arch/x86/kvm/svm/svm.c | 7 ++----- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 5 +---- arch/x86/kvm/x86.c | 20 +++++++------------- 8 files changed, 21 insertions(+), 49 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b5b2d0fde579..7a59043a6ab0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2045,7 +2045,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); -void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); +unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index eafb48f0b745..3328c7c7f2f9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3013,7 +3013,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ret = em_push(ctxt); } - ops->get_dr(ctxt, 7, &dr7); + dr7 = ops->get_dr(ctxt, 7); ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); return ret; @@ -3868,15 +3868,6 @@ static int check_cr_access(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) -{ - unsigned long dr7; - - ctxt->ops->get_dr(ctxt, 7, &dr7); - - return dr7 & DR7_GD; -} - static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; @@ -3889,10 +3880,10 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt) if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); - if (check_dr7_gd(ctxt)) { + if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) { ulong dr6; - ctxt->ops->get_dr(ctxt, 6, &dr6); + dr6 = ctxt->ops->get_dr(ctxt, 6); dr6 &= ~DR_TRAP_BITS; dr6 |= DR6_BD | DR6_ACTIVE_LOW; ctxt->ops->set_dr(ctxt, 6, dr6); @@ -5451,7 +5442,7 @@ twobyte_insn: ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ - ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); + ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index e6d149825169..153977c556f4 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -203,7 +203,7 @@ struct x86_emulate_ops { ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); - void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); + ulong (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index dc3d95fdca7d..19a7a0a31953 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -184,7 +184,6 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram) { struct desc_ptr dt; - unsigned long val; int i; smram->cr0 = kvm_read_cr0(vcpu); @@ -195,10 +194,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, for (i = 0; i < 8; i++) smram->gprs[i] = kvm_register_read_raw(vcpu, i); - kvm_get_dr(vcpu, 6, &val); - smram->dr6 = (u32)val; - kvm_get_dr(vcpu, 7, &val); - smram->dr7 = (u32)val; + smram->dr6 = (u32)kvm_get_dr(vcpu, 6); + smram->dr7 = (u32)kvm_get_dr(vcpu, 7); enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); @@ -231,7 +228,6 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_state_64 *smram) { struct desc_ptr dt; - unsigned long val; int i; for (i = 0; i < 16; i++) @@ -240,11 +236,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, smram->rip = kvm_rip_read(vcpu); smram->rflags = kvm_get_rflags(vcpu); - - kvm_get_dr(vcpu, 6, &val); - smram->dr6 = val; - kvm_get_dr(vcpu, 7, &val); - smram->dr7 = val; + smram->dr6 = kvm_get_dr(vcpu, 6); + smram->dr7 = kvm_get_dr(vcpu, 7); smram->cr0 = kvm_read_cr0(vcpu); smram->cr3 = kvm_read_cr3(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e90b429c84f1..dda91f7cd71b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2735,7 +2735,6 @@ static int dr_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int reg, dr; - unsigned long val; int err = 0; /* @@ -2763,11 +2762,9 @@ static int dr_interception(struct kvm_vcpu *vcpu) dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; if (dr >= 16) { /* mov to DRn */ dr -= 16; - val = kvm_register_read(vcpu, reg); - err = kvm_set_dr(vcpu, dr, val); + err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); } else { - kvm_get_dr(vcpu, dr, &val); - kvm_register_write(vcpu, reg, val); + kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr)); } return kvm_complete_insn_gp(vcpu, err); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 6329a306856b..692fd559f086 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4433,7 +4433,7 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) - kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); + vmcs12->guest_dr7 = kvm_get_dr(vcpu, 7); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e262bc2ba4e5..aa47433d0c9b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5566,10 +5566,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { - unsigned long val; - - kvm_get_dr(vcpu, dr, &val); - kvm_register_write(vcpu, reg, val); + kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr)); err = 0; } else { err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7eb7736cfd76..b195eb21639c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1399,22 +1399,19 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) } EXPORT_SYMBOL_GPL(kvm_set_dr); -void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) +unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) { size_t size = ARRAY_SIZE(vcpu->arch.db); switch (dr) { case 0 ... 3: - *val = vcpu->arch.db[array_index_nospec(dr, size)]; - break; + return vcpu->arch.db[array_index_nospec(dr, size)]; case 4: case 6: - *val = vcpu->arch.dr6; - break; + return vcpu->arch.dr6; case 5: default: /* 7 */ - *val = vcpu->arch.dr7; - break; + return vcpu->arch.dr7; } } EXPORT_SYMBOL_GPL(kvm_get_dr); @@ -5509,7 +5506,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { - unsigned long val; unsigned int i; memset(dbgregs, 0, sizeof(*dbgregs)); @@ -5518,8 +5514,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) dbgregs->db[i] = vcpu->arch.db[i]; - kvm_get_dr(vcpu, 6, &val); - dbgregs->dr6 = val; + dbgregs->dr6 = kvm_get_dr(vcpu, 6); dbgregs->dr7 = vcpu->arch.dr7; } @@ -8173,10 +8168,9 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); } -static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, - unsigned long *dest) +static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr) { - kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); + return kvm_get_dr(emul_to_vcpu(ctxt), dr); } static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, -- cgit From 2a5f091ce1c9222fc9f98374d92db9539e6004ae Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 9 Feb 2024 14:07:52 -0800 Subject: KVM: x86: Open code all direct reads to guest DR6 and DR7 Bite the bullet, and open code all direct reads of DR6 and DR7. KVM currently has a mix of open coded accesses and calls to kvm_get_dr(), which is confusing and ugly because there's no rhyme or reason as to why any particular chunk of code uses kvm_get_dr(). The obvious alternative is to force all accesses through kvm_get_dr(), but it's not at all clear that doing so would be a net positive, e.g. even if KVM ends up wanting/needing to force all reads through a common helper, e.g. to play caching games, the cost of reverting this change is likely lower than the ongoing cost of maintaining weird, arbitrary code. No functional change intended. Cc: Mathias Krause Reviewed-by: Mathias Krause Link: https://lore.kernel.org/r/20240209220752.388160-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/smm.c | 8 ++++---- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/x86.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index 19a7a0a31953..d06d43d8d2aa 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -194,8 +194,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, for (i = 0; i < 8; i++) smram->gprs[i] = kvm_register_read_raw(vcpu, i); - smram->dr6 = (u32)kvm_get_dr(vcpu, 6); - smram->dr7 = (u32)kvm_get_dr(vcpu, 7); + smram->dr6 = (u32)vcpu->arch.dr6; + smram->dr7 = (u32)vcpu->arch.dr7; enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); @@ -236,8 +236,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, smram->rip = kvm_rip_read(vcpu); smram->rflags = kvm_get_rflags(vcpu); - smram->dr6 = kvm_get_dr(vcpu, 6); - smram->dr7 = kvm_get_dr(vcpu, 7); + smram->dr6 = vcpu->arch.dr6; + smram->dr7 = vcpu->arch.dr7; smram->cr0 = kvm_read_cr0(vcpu); smram->cr3 = kvm_read_cr3(vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 692fd559f086..0cd977f22c4b 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4433,7 +4433,7 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) - vmcs12->guest_dr7 = kvm_get_dr(vcpu, 7); + vmcs12->guest_dr7 = vcpu->arch.dr7; if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b195eb21639c..f97e443ce853 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5514,7 +5514,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) dbgregs->db[i] = vcpu->arch.db[i]; - dbgregs->dr6 = kvm_get_dr(vcpu, 6); + dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; } -- cgit From 9c9025ea003a03f967affd690f39b4ef3452c0f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:00 -0800 Subject: KVM: x86: Plumb "force_immediate_exit" into kvm_entry() tracepoint Annotate the kvm_entry() tracepoint with "immediate exit" when KVM is forcing a VM-Exit immediately after VM-Enter, e.g. when KVM wants to inject an event but needs to first complete some other operation. Knowing that KVM is (or isn't) forcing an exit is useful information when debugging issues related to event injection. Suggested-by: Maxim Levitsky Link: https://lore.kernel.org/r/20240110012705.506918-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/svm/svm.c | 5 +++-- arch/x86/kvm/trace.h | 9 ++++++--- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 2 +- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7a59043a6ab0..bdda3edddec9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1663,7 +1663,8 @@ struct kvm_x86_ops { void (*flush_tlb_guest)(struct kvm_vcpu *vcpu); int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); - enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu); + enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu, + bool force_immediate_exit); int (*handle_exit)(struct kvm_vcpu *vcpu, enum exit_fastpath_completion exit_fastpath); int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index dda91f7cd71b..ab8ad8d0e818 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4112,12 +4112,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_exit_irqoff(); } -static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, + bool force_immediate_exit) { struct vcpu_svm *svm = to_svm(vcpu); bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); - trace_kvm_entry(vcpu); + trace_kvm_entry(vcpu, force_immediate_exit); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 83843379813e..88659de4d2a7 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -15,20 +15,23 @@ * Tracepoint for guest mode entry. */ TRACE_EVENT(kvm_entry, - TP_PROTO(struct kvm_vcpu *vcpu), - TP_ARGS(vcpu), + TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit), + TP_ARGS(vcpu, force_immediate_exit), TP_STRUCT__entry( __field( unsigned int, vcpu_id ) __field( unsigned long, rip ) + __field( bool, immediate_exit ) ), TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; __entry->rip = kvm_rip_read(vcpu); + __entry->immediate_exit = force_immediate_exit; ), - TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) + TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip, + __entry->immediate_exit ? "[immediate exit]" : "") ); /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index aa47433d0c9b..e3653d11f001 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7265,7 +7265,7 @@ out: guest_state_exit_irqoff(); } -static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) +static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4; @@ -7292,7 +7292,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) return EXIT_FASTPATH_NONE; } - trace_kvm_entry(vcpu); + trace_kvm_entry(vcpu, force_immediate_exit); if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f97e443ce853..e92f14b9d0d6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10956,7 +10956,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); - exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); + exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit); if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break; -- cgit From e6b5d16bbd2d4c8259ad76aa33de80d561aba5f9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:01 -0800 Subject: KVM: VMX: Re-enter guest in fastpath for "spurious" preemption timer exits Re-enter the guest in the fast path if VMX preeemption timer VM-Exit was "spurious", i.e. if KVM "soft disabled" the timer by writing -1u and by some miracle the timer expired before any other VM-Exit occurred. This is just an intermediate step to cleaning up the preemption timer handling, optimizing these types of spurious VM-Exits is not interesting as they are extremely rare/infrequent. Link: https://lore.kernel.org/r/20240110012705.506918-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e3653d11f001..a44b09e35fd6 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5992,8 +5992,15 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (!vmx->req_immediate_exit && - !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { + /* + * In the *extremely* unlikely scenario that this is a spurious VM-Exit + * due to the timer expiring while it was "soft" disabled, just eat the + * exit and re-enter the guest. + */ + if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) + return EXIT_FASTPATH_REENTER_GUEST; + + if (!vmx->req_immediate_exit) { kvm_lapic_expired_hv_timer(vcpu); return EXIT_FASTPATH_REENTER_GUEST; } -- cgit From 11776aa0cfa7d007ad1799b1553bdcbd830e5010 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:02 -0800 Subject: KVM: VMX: Handle forced exit due to preemption timer in fastpath Handle VMX preemption timer VM-Exits due to KVM forcing an exit in the exit fastpath, i.e. avoid calling back into handle_preemption_timer() for the same exit. There is no work to be done for forced exits, as the name suggests the goal is purely to get control back in KVM. In addition to shaving a few cycles, this will allow cleanly separating handle_fastpath_preemption_timer() from handle_preemption_timer(), e.g. it's not immediately obvious why _apparently_ calling handle_fastpath_preemption_timer() twice on a "slow" exit is necessary: the "slow" call is necessary to handle exits from L2, which are excluded from the fastpath by vmx_vcpu_run(). Link: https://lore.kernel.org/r/20240110012705.506918-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a44b09e35fd6..7bd360b5bfb9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6000,12 +6000,15 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) return EXIT_FASTPATH_REENTER_GUEST; - if (!vmx->req_immediate_exit) { - kvm_lapic_expired_hv_timer(vcpu); - return EXIT_FASTPATH_REENTER_GUEST; - } + /* + * If the timer expired because KVM used it to force an immediate exit, + * then mission accomplished. + */ + if (vmx->req_immediate_exit) + return EXIT_FASTPATH_EXIT_HANDLED; - return EXIT_FASTPATH_NONE; + kvm_lapic_expired_hv_timer(vcpu); + return EXIT_FASTPATH_REENTER_GUEST; } static int handle_preemption_timer(struct kvm_vcpu *vcpu) -- cgit From bf1a49436ea37b98dd2f37c57608951d0e28eecc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:03 -0800 Subject: KVM: x86: Move handling of is_guest_mode() into fastpath exit handlers Let the fastpath code decide which exits can/can't be handled in the fastpath when L2 is active, e.g. when KVM generates a VMX preemption timer exit to forcefully regain control, there is no "work" to be done and so such exits can be handled in the fastpath regardless of whether L1 or L2 is active. Moving the is_guest_mode() check into the fastpath code also makes it easier to see that L2 isn't allowed to use the fastpath in most cases, e.g. it's not immediately obvious why handle_fastpath_preemption_timer() is called from the fastpath and the normal path. Link: https://lore.kernel.org/r/20240110012705.506918-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 6 +++--- arch/x86/kvm/vmx/vmx.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ab8ad8d0e818..fe4ac03aaa3a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4089,6 +4089,9 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && to_svm(vcpu)->vmcb->control.exit_info_1) return handle_fastpath_set_msr_irqoff(vcpu); @@ -4235,9 +4238,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, svm_complete_interrupts(vcpu); - if (is_guest_mode(vcpu)) - return EXIT_FASTPATH_NONE; - return svm_exit_handlers_fastpath(vcpu); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7bd360b5bfb9..6f21d389bed5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7214,6 +7214,9 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + switch (to_vmx(vcpu)->exit_reason.basic) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); @@ -7425,9 +7428,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); - if (is_guest_mode(vcpu)) - return EXIT_FASTPATH_NONE; - return vmx_exit_handlers_fastpath(vcpu); } -- cgit From 7b3d1bbf8d68d76fb21210932a5e8ed8ea80dbcc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:04 -0800 Subject: KVM: VMX: Handle KVM-induced preemption timer exits in fastpath for L2 Eat VMX treemption timer exits in the fastpath regardless of whether L1 or L2 is active. The VM-Exit is 100% KVM-induced, i.e. there is nothing directly related to the exit that KVM needs to do on behalf of the guest, thus there is no reason to wait until the slow path to do nothing. Opportunistically add comments explaining why preemption timer exits for emulating the guest's APIC timer need to go down the slow path. Link: https://lore.kernel.org/r/20240110012705.506918-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6f21d389bed5..1a3a4a92dc60 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6007,13 +6007,26 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) if (vmx->req_immediate_exit) return EXIT_FASTPATH_EXIT_HANDLED; + /* + * If L2 is active, go down the slow path as emulating the guest timer + * expiration likely requires synthesizing a nested VM-Exit. + */ + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + kvm_lapic_expired_hv_timer(vcpu); return EXIT_FASTPATH_REENTER_GUEST; } static int handle_preemption_timer(struct kvm_vcpu *vcpu) { - handle_fastpath_preemption_timer(vcpu); + /* + * This non-fastpath handler is reached if and only if the preemption + * timer was being used to emulate a guest timer while L2 is active. + * All other scenarios are supposed to be handled in the fastpath. + */ + WARN_ON_ONCE(!is_guest_mode(vcpu)); + kvm_lapic_expired_hv_timer(vcpu); return 1; } @@ -7214,7 +7227,12 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) + /* + * If L2 is active, some VMX preemption timer exits can be handled in + * the fastpath even, all other exits must use the slow path. + */ + if (is_guest_mode(vcpu) && + to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER) return EXIT_FASTPATH_NONE; switch (to_vmx(vcpu)->exit_reason.basic) { -- cgit From 0ec3d6d1f169baa7fc512ae4b78d17e7c94b7763 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 17:27:05 -0800 Subject: KVM: x86: Fully defer to vendor code to decide how to force immediate exit Now that vmx->req_immediate_exit is used only in the scope of vmx_vcpu_run(), use force_immediate_exit to detect that KVM should usurp the VMX preemption to force a VM-Exit and let vendor code fully handle forcing a VM-Exit. Opportunsitically drop __kvm_request_immediate_exit() and just have vendor code call smp_send_reschedule() directly. SVM already does this when injecting an event while also trying to single-step an IRET, i.e. it's not exactly secret knowledge that KVM uses a reschedule IPI to force an exit. Link: https://lore.kernel.org/r/20240110012705.506918-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 1 - arch/x86/include/asm/kvm_host.h | 3 --- arch/x86/kvm/svm/svm.c | 7 ++++--- arch/x86/kvm/vmx/vmx.c | 32 ++++++++++++++------------------ arch/x86/kvm/vmx/vmx.h | 2 -- arch/x86/kvm/x86.c | 10 +--------- 6 files changed, 19 insertions(+), 36 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 378ed944b849..3942b74c1b75 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier) KVM_X86_OP(get_exit_info) KVM_X86_OP(check_intercept) KVM_X86_OP(handle_exit_irqoff) -KVM_X86_OP(request_immediate_exit) KVM_X86_OP(sched_in) KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging) KVM_X86_OP_OPTIONAL(vcpu_blocking) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bdda3edddec9..198d30cceaf2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1732,8 +1732,6 @@ struct kvm_x86_ops { struct x86_exception *exception); void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); - void (*request_immediate_exit)(struct kvm_vcpu *vcpu); - void (*sched_in)(struct kvm_vcpu *vcpu, int cpu); /* @@ -2239,7 +2237,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); -void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fe4ac03aaa3a..b9096bb79c00 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4140,9 +4140,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, * is enough to force an immediate vmexit. */ disable_nmi_singlestep(svm); - smp_send_reschedule(vcpu->cpu); + force_immediate_exit = true; } + if (force_immediate_exit) + smp_send_reschedule(vcpu->cpu); + pre_svm_run(vcpu); sync_lapic_to_cr8(vcpu); @@ -4995,8 +4998,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .check_intercept = svm_check_intercept, .handle_exit_irqoff = svm_handle_exit_irqoff, - .request_immediate_exit = __kvm_request_immediate_exit, - .sched_in = svm_sched_in, .nested_ops = &svm_nested_ops, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1a3a4a92dc60..c9e117506d73 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -49,6 +49,8 @@ #include #include +#include + #include "capabilities.h" #include "cpuid.h" #include "hyperv.h" @@ -1281,8 +1283,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) u16 fs_sel, gs_sel; int i; - vmx->req_immediate_exit = false; - /* * Note that guest MSRs to be saved/restored can also be changed * when guest state is loaded. This happens when guest transitions @@ -5988,7 +5988,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) return 1; } -static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) +static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu, + bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -6004,7 +6005,7 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) * If the timer expired because KVM used it to force an immediate exit, * then mission accomplished. */ - if (vmx->req_immediate_exit) + if (force_immediate_exit) return EXIT_FASTPATH_EXIT_HANDLED; /* @@ -7166,13 +7167,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc; - if (vmx->req_immediate_exit) { + if (force_immediate_exit) { vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); vmx->loaded_vmcs->hv_timer_soft_disabled = false; } else if (vmx->hv_deadline_tsc != -1) { @@ -7225,7 +7226,8 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, barrier_nospec(); } -static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) +static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu, + bool force_immediate_exit) { /* * If L2 is active, some VMX preemption timer exits can be handled in @@ -7239,7 +7241,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: - return handle_fastpath_preemption_timer(vcpu); + return handle_fastpath_preemption_timer(vcpu, force_immediate_exit); default: return EXIT_FASTPATH_NONE; } @@ -7382,7 +7384,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) vmx_passthrough_lbr_msrs(vcpu); if (enable_preemption_timer) - vmx_update_hv_timer(vcpu); + vmx_update_hv_timer(vcpu, force_immediate_exit); + else if (force_immediate_exit) + smp_send_reschedule(vcpu->cpu); kvm_wait_lapic_expire(vcpu); @@ -7446,7 +7450,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); - return vmx_exit_handlers_fastpath(vcpu); + return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit); } static void vmx_vcpu_free(struct kvm_vcpu *vcpu) @@ -7926,11 +7930,6 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); } -static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) -{ - to_vmx(vcpu)->req_immediate_exit = true; -} - static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, struct x86_instruction_info *info) { @@ -8383,8 +8382,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .check_intercept = vmx_check_intercept, .handle_exit_irqoff = vmx_handle_exit_irqoff, - .request_immediate_exit = vmx_request_immediate_exit, - .sched_in = vmx_sched_in, .cpu_dirty_log_size = PML_ENTITY_NUM, @@ -8644,7 +8641,6 @@ static __init int hardware_setup(void) if (!enable_preemption_timer) { vmx_x86_ops.set_hv_timer = NULL; vmx_x86_ops.cancel_hv_timer = NULL; - vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; } kvm_caps.supported_mce_cap |= MCG_LMCE_P; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index e3b0985bb74a..65786dbe7d60 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -332,8 +332,6 @@ struct vcpu_vmx { unsigned int ple_window; bool ple_window_dirty; - bool req_immediate_exit; - /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e92f14b9d0d6..d9724a35d8f3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10667,12 +10667,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); } -void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) -{ - smp_send_reschedule(vcpu->cpu); -} -EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); - /* * Called within kvm->srcu read side. * Returns 1 to let vcpu_run() continue the guest execution loop without @@ -10922,10 +10916,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto cancel_injection; } - if (req_immediate_exit) { + if (req_immediate_exit) kvm_make_request(KVM_REQ_EVENT, vcpu); - static_call(kvm_x86_request_immediate_exit)(vcpu); - } fpregs_assert_state_consistent(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) -- cgit From a78d9046696b88079a5696bccec4e4e439a3f2a2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 9 Feb 2024 14:20:46 -0800 Subject: KVM: x86: Move "KVM no-APIC vCPU" key management into local APIC code Move incrementing and decrementing of kvm_has_noapic_vcpu into kvm_create_lapic() and kvm_free_lapic() respectively to fix a benign bug where KVM fails to decrement the count if vCPU creation ultimately fails, e.g. due to a memory allocation failing. Note, the bug is benign as kvm_has_noapic_vcpu is used purely to optimize lapic_in_kernel() checks, and that optimization is quite dubious. That, and practically speaking no setup that cares at all about performance runs with a userspace local APIC. Reported-by: Li RongQing Cc: Maxim Levitsky Reviewed-by: Xu Yilun Link: https://lore.kernel.org/r/20240209222047.394389-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/lapic.c | 27 ++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 29 +++-------------------------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3242f3da2457..681f6d82d015 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -124,6 +124,9 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap) return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } +__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); +EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); + __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); @@ -2466,8 +2469,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!vcpu->arch.apic) + if (!vcpu->arch.apic) { + static_branch_dec(&kvm_has_noapic_vcpu); return; + } hrtimer_cancel(&apic->lapic_timer.timer); @@ -2809,6 +2814,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) ASSERT(vcpu != NULL); + if (!irqchip_in_kernel(vcpu->kvm)) { + static_branch_inc(&kvm_has_noapic_vcpu); + return 0; + } + apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT); if (!apic) goto nomem; @@ -2844,6 +2854,21 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */ kvm_iodevice_init(&apic->dev, &apic_mmio_ops); + /* + * Defer evaluating inhibits until the vCPU is first run, as this vCPU + * will not get notified of any changes until this vCPU is visible to + * other vCPUs (marked online and added to the set of vCPUs). + * + * Opportunistically mark APICv active as VMX in particularly is highly + * unlikely to have inhibits. Ignore the current per-VM APICv state so + * that vCPU creation is guaranteed to run with a deterministic value, + * the request will ensure the vCPU gets the correct state before VM-Entry. + */ + if (enable_apicv) { + apic->apicv_active = true; + kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); + } + return 0; nomem_free_apic: kfree(apic); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d9724a35d8f3..19b18679779c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12046,27 +12046,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (r < 0) return r; - if (irqchip_in_kernel(vcpu->kvm)) { - r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); - if (r < 0) - goto fail_mmu_destroy; - - /* - * Defer evaluating inhibits until the vCPU is first run, as - * this vCPU will not get notified of any changes until this - * vCPU is visible to other vCPUs (marked online and added to - * the set of vCPUs). Opportunistically mark APICv active as - * VMX in particularly is highly unlikely to have inhibits. - * Ignore the current per-VM APICv state so that vCPU creation - * is guaranteed to run with a deterministic value, the request - * will ensure the vCPU gets the correct state before VM-Entry. - */ - if (enable_apicv) { - vcpu->arch.apic->apicv_active = true; - kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); - } - } else - static_branch_inc(&kvm_has_noapic_vcpu); + r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); + if (r < 0) + goto fail_mmu_destroy; r = -ENOMEM; @@ -12187,8 +12169,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); kvfree(vcpu->arch.cpuid_entries); - if (!lapic_in_kernel(vcpu)) - static_branch_dec(&kvm_has_noapic_vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) @@ -12465,9 +12445,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; } -__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); -EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); - void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); -- cgit From fc3c94142b3a4391cab94adde56fcfbea25723e5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 9 Feb 2024 14:20:47 -0800 Subject: KVM: x86: Sanity check that kvm_has_noapic_vcpu is zero at module_exit() WARN if kvm.ko is unloaded with an elevated kvm_has_noapic_vcpu to guard against incorrect management of the key, e.g. to detect if KVM fails to decrement the key in error paths. Because kvm_has_noapic_vcpu is purely an optimization, in all likelihood KVM could completely botch handling of kvm_has_noapic_vcpu and no one would notice (which is a good argument for deleting the key entirely, but that's a problem for another day). Note, ideally the sanity check would be performance when kvm_usage_count goes to zero, but adding an arch callback just for this sanity check isn't at all worth doing. Link: https://lore.kernel.org/r/20240209222047.394389-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 19b18679779c..019320580a84 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13871,9 +13871,6 @@ module_init(kvm_x86_init); static void __exit kvm_x86_exit(void) { - /* - * If module_init() is implemented, module_exit() must also be - * implemented to allow module unload. - */ + WARN_ON_ONCE(static_branch_unlikely(&kvm_has_noapic_vcpu)); } module_exit(kvm_x86_exit); -- cgit From 77bcd9e6231a5297ef417a7d7f734d61c2bcceb6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 16:39:35 -0800 Subject: KVM: Add dedicated arch hook for querying if vCPU was preempted in-kernel Plumb in a dedicated hook for querying whether or not a vCPU was preempted in-kernel. Unlike literally every other architecture, x86's VMX can check if a vCPU is in kernel context if and only if the vCPU is loaded on the current pCPU. x86's kvm_arch_vcpu_in_kernel() works around the limitation by querying kvm_get_running_vcpu() and redirecting to vcpu->arch.preempted_in_kernel as needed. But that's unnecessary, confusing, and fragile, e.g. x86 has had at least one bug where KVM incorrectly used a stale preempted_in_kernel. No functional change intended. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20240110003938.490206-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 5 +++++ include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 14 +++++++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 019320580a84..94346490f407 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13054,6 +13054,11 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) return false; } +bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_in_kernel(vcpu); +} + bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) { if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7e7fd25b09b3..28b020404a41 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1505,6 +1505,7 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); +bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); int kvm_arch_create_vm_debugfs(struct kvm *kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 10bfc88a69f7..9b92858c8b72 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4042,6 +4042,18 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) return false; } +/* + * By default, simply query the target vCPU's current mode when checking if a + * vCPU was preempted in kernel mode. All architectures except x86 (or more + * specifical, except VMX) allow querying whether or not a vCPU is in kernel + * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel() + * directly for cross-vCPU checks is functionally correct and accurate. + */ +bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_in_kernel(vcpu); +} + bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) { return false; @@ -4080,7 +4092,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && !kvm_arch_dy_has_pending_interrupt(vcpu) && - !kvm_arch_vcpu_in_kernel(vcpu)) + !kvm_arch_vcpu_preempted_in_kernel(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; -- cgit From 9b8615c5d37fca15b330882bafceaf24f2398352 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 16:39:36 -0800 Subject: KVM: x86: Rely solely on preempted_in_kernel flag for directed yield Snapshot preempted_in_kernel using kvm_arch_vcpu_in_kernel() so that the flag is "accurate" (or rather, consistent and deterministic within KVM) for guests with protected state, and explicitly use preempted_in_kernel when checking if a vCPU was preempted in kernel mode instead of bouncing through kvm_arch_vcpu_in_kernel(). Drop the gnarly logic in kvm_arch_vcpu_in_kernel() that redirects to preempted_in_kernel if the target vCPU is not the "running", i.e. loaded, vCPU, as the only reason that code existed was for the directed yield case where KVM wants to check the CPL of a vCPU that may or may not be loaded on the current pCPU. Cc: Like Xu Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20240110003938.490206-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 94346490f407..ab7d2baaae7a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5059,8 +5059,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) int idx; if (vcpu->preempted) { - if (!vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); + vcpu->arch.preempted_in_kernel = kvm_arch_vcpu_in_kernel(vcpu); /* * Take the srcu lock as memslots will be accessed to check the gfn @@ -13056,7 +13055,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) { - return kvm_arch_vcpu_in_kernel(vcpu); + return vcpu->arch.preempted_in_kernel; } bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) @@ -13079,9 +13078,6 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) if (vcpu->arch.guest_state_protected) return true; - if (vcpu != kvm_get_running_vcpu()) - return vcpu->arch.preempted_in_kernel; - return static_call(kvm_x86_get_cpl)(vcpu) == 0; } -- cgit From 322d79f1db4b033844dcb2de43abd570abbd04b4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 16:39:37 -0800 Subject: KVM: x86: Clean up directed yield API for "has pending interrupt" Directly return the boolean result of whether or not a vCPU has a pending interrupt instead of effectively doing: if (true) return true; return false; Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20240110003938.490206-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ab7d2baaae7a..e6283628fd53 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13046,11 +13046,8 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) { - if (kvm_vcpu_apicv_active(vcpu) && - static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) - return true; - - return false; + return kvm_vcpu_apicv_active(vcpu) && + static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu); } bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) -- cgit From dafc17dd529a6194e199b837916062090562ff80 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 16:39:38 -0800 Subject: KVM: Add a comment explaining the directed yield pending interrupt logic Add a comment to explain why KVM treats vCPUs with pending interrupts as in-kernel when a vCPU wants to yield to a vCPU that was preempted while running in kernel mode. Link: https://lore.kernel.org/r/20240110003938.490206-5-seanjc@google.com Signed-off-by: Sean Christopherson --- virt/kvm/kvm_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9b92858c8b72..c0c2fcdabe1b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4090,6 +4090,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) continue; + + /* + * Treat the target vCPU as being in-kernel if it has a + * pending interrupt, as the vCPU trying to yield may + * be spinning waiting on IPI delivery, i.e. the target + * vCPU is in-kernel for the purposes of directed yield. + */ if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && !kvm_arch_dy_has_pending_interrupt(vcpu) && !kvm_arch_vcpu_preempted_in_kernel(vcpu)) -- cgit From 78ccfce774435a08d9c69ce434099166cc7952c8 Mon Sep 17 00:00:00 2001 From: John Allen Date: Tue, 27 Feb 2024 20:03:56 +0000 Subject: KVM: SVM: Rename vmplX_ssp -> plX_ssp The SSP fields in the SEV-ES save area were mistakenly named vmplX_ssp instead of plX_ssp. Rename these to the correct names as defined in the APM. Fixes: 6d3b3d34e39e ("KVM: SVM: Update the SEV-ES save area mapping") Signed-off-by: John Allen Link: https://lore.kernel.org/r/20240227200356.35114-1-john.allen@amd.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/svm.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 87a7b917d30e..728c98175b9c 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -358,10 +358,10 @@ struct sev_es_save_area { struct vmcb_seg ldtr; struct vmcb_seg idtr; struct vmcb_seg tr; - u64 vmpl0_ssp; - u64 vmpl1_ssp; - u64 vmpl2_ssp; - u64 vmpl3_ssp; + u64 pl0_ssp; + u64 pl1_ssp; + u64 pl2_ssp; + u64 pl3_ssp; u64 u_cet; u8 reserved_0xc8[2]; u8 vmpl; -- cgit