summaryrefslogtreecommitdiff
path: root/virt/kvm/arm/vgic/vgic-mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-mmio.c')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c115
1 files changed, 93 insertions, 22 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index deb51ee16a3d..83d82bd7dc4e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -16,6 +16,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
+#include <kvm/arm_arch_timer.h>
#include <kvm/arm_vgic.h>
#include "vgic.h"
@@ -122,10 +123,43 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
return value;
}
+/*
+ * This function will return the VCPU that performed the MMIO access and
+ * trapped from within the VM, and will return NULL if this is a userspace
+ * access.
+ *
+ * We can disable preemption locally around accessing the per-CPU variable,
+ * and use the resolved vcpu pointer after enabling preemption again, because
+ * even if the current thread is migrated to another CPU, reading the per-CPU
+ * value later will give us the same value as we update the per-CPU variable
+ * in the preempt notifier handlers.
+ */
+static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
+{
+ struct kvm_vcpu *vcpu;
+
+ preempt_disable();
+ vcpu = kvm_arm_get_running_vcpu();
+ preempt_enable();
+ return vcpu;
+}
+
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->pending_latch = true;
+ vgic_irq_set_phys_active(irq, true);
+}
+
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ bool is_uaccess = !vgic_get_mmio_requester_vcpu();
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
@@ -134,17 +168,45 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags);
- irq->pending_latch = true;
-
+ if (irq->hw)
+ vgic_hw_irq_spending(vcpu, irq, is_uaccess);
+ else
+ irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->pending_latch = false;
+
+ /*
+ * We don't want the guest to effectively mask the physical
+ * interrupt by doing a write to SPENDR followed by a write to
+ * CPENDR for HW interrupts, so we clear the active state on
+ * the physical side if the virtual interrupt is not active.
+ * This may lead to taking an additional interrupt on the
+ * host, but that should not be a problem as the worst that
+ * can happen is an additional vgic injection. We also clear
+ * the pending state to maintain proper semantics for edge HW
+ * interrupts.
+ */
+ vgic_irq_set_phys_pending(irq, false);
+ if (!irq->active)
+ vgic_irq_set_phys_active(irq, false);
+}
+
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ bool is_uaccess = !vgic_get_mmio_requester_vcpu();
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
@@ -154,7 +216,10 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
spin_lock_irqsave(&irq->irq_lock, flags);
- irq->pending_latch = false;
+ if (irq->hw)
+ vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
+ else
+ irq->pending_latch = false;
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -181,27 +246,24 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
return value;
}
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool active, bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->active = active;
+ vgic_irq_set_phys_active(irq, active);
+}
+
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
- bool new_active_state)
+ bool active)
{
- struct kvm_vcpu *requester_vcpu;
unsigned long flags;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
- /*
- * The vcpu parameter here can mean multiple things depending on how
- * this function is called; when handling a trap from the kernel it
- * depends on the GIC version, and these functions are also called as
- * part of save/restore from userspace.
- *
- * Therefore, we have to figure out the requester in a reliable way.
- *
- * When accessing VGIC state from user space, the requester_vcpu is
- * NULL, which is fine, because we guarantee that no VCPUs are running
- * when accessing VGIC state from user space so irq->vcpu->cpu is
- * always -1.
- */
- requester_vcpu = kvm_arm_get_running_vcpu();
+ spin_lock_irqsave(&irq->irq_lock, flags);
/*
* If this virtual IRQ was written into a list register, we
@@ -213,14 +275,23 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
* vgic_change_active_prepare) and still has to sync back this IRQ,
* so we release and re-acquire the spin_lock to let the other thread
* sync back the IRQ.
+ *
+ * When accessing VGIC state from user space, requester_vcpu is
+ * NULL, which is fine, because we guarantee that no VCPUs are running
+ * when accessing VGIC state from user space so irq->vcpu->cpu is
+ * always -1.
*/
while (irq->vcpu && /* IRQ may have state in an LR somewhere */
irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock);
- irq->active = new_active_state;
- if (new_active_state)
+ if (irq->hw)
+ vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ else
+ irq->active = active;
+
+ if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
spin_unlock_irqrestore(&irq->irq_lock, flags);