summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-09-13 08:34:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-09-13 08:34:47 -0700
commit84b1349972129918557b7593c37ae52855bdc2e8 (patch)
tree218547e7a9fc11bd2e9b18ead8f1c2b24e60a2c1 /arch
parentb952e974300be3184049a125e7cba112fe659017 (diff)
parent37f66bbef0920429b8cb5eddba849ec4308a9f8e (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "A bit on the bigger side, mostly due to me being on vacation, then busy, then on parental leave, but there's nothing worrisome. ARM: - Multiple stolen time fixes, with a new capability to match x86 - Fix for hugetlbfs mappings when PUD and PMD are the same level - Fix for hugetlbfs mappings when PTE mappings are enforced (dirty logging, for example) - Fix tracing output of 64bit values x86: - nSVM state restore fixes - Async page fault fixes - Lots of small fixes everywhere" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (25 commits) KVM: emulator: more strict rsm checks. KVM: nSVM: more strict SMM checks when returning to nested guest SVM: nSVM: setup nested msr permission bitmap on nested state load SVM: nSVM: correctly restore GIF on vmexit from nesting after migration x86/kvm: don't forget to ACK async PF IRQ x86/kvm: properly use DEFINE_IDTENTRY_SYSVEC() macro KVM: VMX: Don't freeze guest when event delivery causes an APIC-access exit KVM: SVM: avoid emulation with stale next_rip KVM: x86: always allow writing '0' to MSR_KVM_ASYNC_PF_EN KVM: SVM: Periodically schedule when unregistering regions on destroy KVM: MIPS: Change the definition of kvm type kvm x86/mmu: use KVM_REQ_MMU_SYNC to sync when needed KVM: nVMX: Fix the update value of nested load IA32_PERF_GLOBAL_CTRL control KVM: fix memory leak in kvm_io_bus_unregister_dev() KVM: Check the allocation of pv cpu mask KVM: nVMX: Update VMCS02 when L2 PAE PDPTE updates detected KVM: arm64: Update page shift if stage 2 block mapping not supported KVM: arm64: Fix address truncation in traces KVM: arm64: Do not try to map PUDs when they are folded into PMD arm64/x86: KVM: Introduce steal-time cap ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kvm/arm.c3
-rw-r--r--arch/arm64/kvm/mmu.c8
-rw-r--r--arch/arm64/kvm/pvtime.c29
-rw-r--r--arch/arm64/kvm/trace_arm.h16
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h6
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/x86/kernel/kvm.c26
-rw-r--r--arch/x86/kvm/emulate.c22
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/svm/nested.c7
-rw-r--r--arch/x86/kvm/svm/sev.c1
-rw-r--r--arch/x86/kvm/svm/svm.c36
-rw-r--r--arch/x86/kvm/vmx/nested.c10
-rw-r--r--arch/x86/kvm/vmx/vmx.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c5
17 files changed, 120 insertions, 61 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e52c927aade5..905c2b87e05a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 46dc3d75cf13..b588c3b5c2f0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = kvm_arm_pvtime_supported();
+ break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ba00bcc0c884..9a636b8064f1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1877,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
}
/*
@@ -1970,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
- if (vma_pagesize == PUD_SIZE) {
+ /*
+ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
+ * all we have is a 2-level page table. Trying to map a PUD in
+ * this case would be fatally wrong.
+ */
+ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index f7b52ce1557e..920ac43077ad 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 steal;
- __le64 steal_le;
- u64 offset;
- int idx;
u64 base = vcpu->arch.steal.base;
+ u64 last_steal = vcpu->arch.steal.last_steal;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
+ int idx;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
-static bool kvm_arm_pvtime_supported(void)
+bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4691053c5ee4..ff0444352bba 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..8d78acc4fba7 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wf%c at: 0x%08lx",
+ TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
- TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+ TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08320b0b2b27..1b51b727b140 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
- irqentry_state_t state;
- state = irqentry_enter(regs);
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
- irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
@@ -654,7 +652,6 @@ static void __init kvm_guest_init(void)
}
if (pv_tlb_flush_supported()) {
- pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
@@ -767,6 +764,14 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
+static void kvm_free_pv_cpu_mask(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
static __init int kvm_alloc_cpumask(void)
{
int cpu;
@@ -785,11 +790,20 @@ static __init int kvm_alloc_cpumask(void)
if (alloc)
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ if (!zalloc_cpumask_var_node(
+ per_cpu_ptr(&__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu))) {
+ goto zalloc_cpumask_fail;
+ }
}
+ apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+ pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
return 0;
+
+zalloc_cpumask_fail:
+ kvm_free_pv_cpu_mask();
+ return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5299ef5ff18d..2f6510de6b0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7f60);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
- ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
+
+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 43fdb0c12a5d..71aa3da2a0b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2469,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..e90bc436f584 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
- svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ /* On vmexit the GIF is set to false */
+ svm_set_gif(svm, false);
+
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
load_nested_vmcb_control(svm, &ctl);
nested_prepare_vmcb_control(svm);
+ if (!nested_svm_vmrun_msrpm(svm))
+ return -EINVAL;
+
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..7bf7bf734979 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1106,6 +1106,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0194336b64a4..c44f3e9140d5 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- svm_complete_interrupts(svm);
-
if (is_guest_mode(vcpu)) {
int vmexit;
@@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
- exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+ vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
- vmcb_mark_all_clean(svm->vmcb);
+ svm_complete_interrupts(svm);
+ exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *nested_vmcb;
struct kvm_host_map map;
- u64 guest;
- u64 vmcb;
int ret = 0;
- guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (guest) {
- if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
- return 1;
- nested_vmcb = map.hva;
- ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (guest) {
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
+
+ if (!(saved_efer & EFER_SVME))
+ return 1;
+
+ if (kvm_vcpu_map(&svm->vcpu,
+ gpa_to_gfn(vmcb), &map) == -EINVAL)
+ return 1;
+
+ ret = enter_svm_guest_mode(svm, vmcb, map.hva);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ }
}
return ret;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..1bb6b31eb646 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
+ /*
+ * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+ * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
+ * up-to-date before switching to L1.
+ */
+ if (enable_ept && is_pae_paging(vcpu))
+ vmx_ept_load_pdptrs(vcpu);
+
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 819c185adf09..8646a797b7a8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2971,7 +2971,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -3114,7 +3114,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
- ept_load_pdptrs(vcpu);
+ vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@@ -6054,6 +6054,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..a2f82127c170 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d39d6cf1d473..1994602a0851 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2731,7 +2731,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
- return 1;
+ return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@@ -3578,6 +3578,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = sched_info_on();
+ break;
default:
break;
}