From 8fa76162487143d202db20ce84e12061b671a058 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Fri, 29 Mar 2019 15:12:53 -0500 Subject: KVM: arm/arm64: arch_timer: Fix CNTP_TVAL calculation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recently the generic timer test of kvm-unit-tests failed to complete (stalled) when a physical timer is being used. This issue is caused by incorrect update of CNTP_CVAL when CNTP_TVAL is being accessed, introduced by 'Commit 84135d3d18da ("KVM: arm/arm64: consolidate arch timer trap handlers")'. According to Arm ARM, the read/write behavior of accesses to the TVAL registers is expected to be: * READ: TimerValue = (CompareValue – (Counter - Offset) * WRITE: CompareValue = ((Counter - Offset) + Sign(TimerValue) This patch fixes the TVAL read/write code path according to the specification. Fixes: 84135d3d18da ("KVM: arm/arm64: consolidate arch timer trap handlers") Signed-off-by: Wei Huang [maz: commit message tidy-up] Signed-off-by: Marc Zyngier --- virt/kvm/arm/arch_timer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 3417f2dbc366..d43308dc3617 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -812,7 +812,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, switch (treg) { case TIMER_REG_TVAL: - val = kvm_phys_timer_read() - timer->cntvoff - timer->cnt_cval; + val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; break; case TIMER_REG_CTL: @@ -858,7 +858,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, { switch (treg) { case TIMER_REG_TVAL: - timer->cnt_cval = val - kvm_phys_timer_read() - timer->cntvoff; + timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val; break; case TIMER_REG_CTL: -- cgit From 96085b949672dca19773495813b577eb3bedf06e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 2 Apr 2019 06:36:23 +0100 Subject: KVM: arm/arm64: vgic-v3: Retire pending interrupts on disabling LPIs When disabling LPIs (for example on reset) at the redistributor level, it is expected that LPIs that was pending in the CPU interface are eventually retired. Currently, this is not what is happening, and these LPIs will stay in the ap_list, eventually being acknowledged by the vcpu (which didn't quite expect this behaviour). The fix is thus to retire these LPIs from the list of pending interrupts as we disable LPIs. Reported-by: Heyi Guo Tested-by: Heyi Guo Fixes: 0e4e82f154e3 ("KVM: arm64: vgic-its: Enable ITS emulation as a virtual MSI controller") Signed-off-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 3 +++ virt/kvm/arm/vgic/vgic.c | 21 +++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 1 + 3 files changed, 25 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 4a12322bf7df..9f4843fe9cda 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -200,6 +200,9 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; + if (was_enabled && !vgic_cpu->lpis_enabled) + vgic_flush_pending_lpis(vcpu); + if (!was_enabled && vgic_cpu->lpis_enabled) vgic_enable_lpis(vcpu); } diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 3af69f2a3866..191deccf60bf 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -151,6 +151,27 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) kfree(irq); } +void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); + + list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { + if (irq->intid >= VGIC_MIN_LPI) { + raw_spin_lock(&irq->irq_lock); + list_del(&irq->ap_list); + irq->vcpu = NULL; + raw_spin_unlock(&irq->irq_lock); + vgic_put_irq(vcpu->kvm, irq); + } + } + + raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); +} + void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) { WARN_ON(irq_set_irqchip_state(irq->host_irq, diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index a90024718ca4..abeeffabc456 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -238,6 +238,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu); bool vgic_has_its(struct kvm *kvm); int kvm_vgic_register_its_device(void); void vgic_enable_lpis(struct kvm_vcpu *vcpu); +void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu); int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, -- cgit From 811328fc3222f7b55846de0cd0404339e2e1e6d7 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Thu, 4 Apr 2019 19:42:30 +0200 Subject: KVM: arm/arm64: Ensure vcpu target is unset on reset failure A failed KVM_ARM_VCPU_INIT should not set the vcpu target, as the vcpu target is used by kvm_vcpu_initialized() to determine if other vcpu ioctls may proceed. We need to set the target before calling kvm_reset_vcpu(), but if that call fails, we should then unset it and clear the feature bitmap while we're at it. Signed-off-by: Andrew Jones [maz: Simplified patch, completed commit message] Signed-off-by: Marc Zyngier --- virt/kvm/arm/arm.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 99c37384ba7b..f412ebc90610 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -934,7 +934,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { - unsigned int i; + unsigned int i, ret; int phys_target = kvm_target_cpu(); if (init->target != phys_target) @@ -969,9 +969,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, vcpu->arch.target = phys_target; /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} + ret = kvm_reset_vcpu(vcpu); + if (ret) { + vcpu->arch.target = -1; + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + } + return ret; +} static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) -- cgit From 2e8010bb71b39ff18aac9fb209b3c3093f4c4783 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 10 Apr 2019 16:14:57 +0100 Subject: kvm: arm: Skip stage2 huge mappings for unaligned ipa backed by THP With commit a80868f398554842b14, we no longer ensure that the THP page is properly aligned in the guest IPA. Skip the stage2 huge mapping for unaligned IPA backed by transparent hugepages. Fixes: a80868f398554842b14 ("KVM: arm/arm64: Enforce PTE mappings at stage2 when needed") Reported-by: Eric Auger Cc: Marc Zyngier Cc: Chirstoffer Dall Cc: Zenghui Yu Cc: Zheng Xiang Cc: Andrew Murray Cc: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- virt/kvm/arm/mmu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 27c958306449..a39dcfdbcc65 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1781,8 +1781,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * Only PMD_SIZE transparent hugepages(THP) are * currently supported. This code will need to be * updated to support other THP sizes. + * + * Make sure the host VA and the guest IPA are sufficiently + * aligned and that the block is contained within the memslot. */ - if (transparent_hugepage_adjust(&pfn, &fault_ipa)) + if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && + transparent_hugepage_adjust(&pfn, &fault_ipa)) vma_pagesize = PMD_SIZE; } -- cgit From 6bc210003dff7b789efae5bb02a0320dc24dd416 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Thu, 25 Apr 2019 13:57:40 +0100 Subject: KVM: arm/arm64: Don't emulate virtual timers on userspace ioctls When a VCPU never runs before a guest exists, but we set timer registers up via ioctls, the associated hrtimer might never get cancelled. Since we moved vcpu_load/put into the arch-specific implementations and only have load/put for KVM_RUN, we won't ever have a scheduled hrtimer for emulating a timer when modifying the timer state via an ioctl from user space. All we need to do is make sure that we pick up the right state when we load the timer state next time userspace calls KVM_RUN again. We also do not need to worry about this interacting with the bg_timer, because if we were in WFI from the guest, and somehow ended up in a kvm_arm_timer_set_reg, it means that: 1. the VCPU thread has received a signal, 2. we have called vcpu_load when being scheduled in again, 3. we have called vcpu_put when we returned to userspace for it to issue another ioctl And therefore will not have a bg_timer programmed and the event is treated as a spurious wakeup from WFI if userspace decides to run the vcpu again even if there are not virtual interrupts. This fixes stray virtual timer interrupts triggered by an expiring hrtimer, which happens after a failed live migration, for instance. Fixes: bee038a674875 ("KVM: arm/arm64: Rework the timer code to use a timer_map") Signed-off-by: Christoffer Dall Reported-by: Andre Przywara Tested-by: Andre Przywara Signed-off-by: Andre Przywara Signed-off-by: Marc Zyngier --- virt/kvm/arm/arch_timer.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index d43308dc3617..7fc272ecae16 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -507,6 +507,14 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) { struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + /* + * Update the timer output so that it is likely to match the + * state we're about to restore. If the timer expires between + * this point and the register restoration, we'll take the + * interrupt anyway. + */ + kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); + /* * When using a userspace irqchip with the architected timers and a * host interrupt controller that doesn't support an active state, we @@ -730,7 +738,6 @@ static void kvm_timer_init_interrupt(void *info) int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) { struct arch_timer_context *timer; - bool level; switch (regid) { case KVM_REG_ARM_TIMER_CTL: @@ -758,10 +765,6 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) return -1; } - level = kvm_timer_should_fire(timer); - kvm_timer_update_irq(vcpu, level, timer); - timer_emulate(timer); - return 0; } -- cgit