summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/pmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c41
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h14
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
-rw-r--r--virt/kvm/async_pf.c13
5 files changed, 63 insertions, 25 deletions
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 6e9c40eea208..69ccce308458 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
continue;
type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
& ARMV8_PMU_EVTYPE_EVENT;
- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
&& (enable & BIT(i))) {
reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg);
@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
/* Software increment event does't need to be backed by a perf event */
- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
+ select_idx != ARMV8_PMU_CYCLE_IDX)
return;
memset(&attr, 0, sizeof(struct perf_event_attr));
@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
- attr.config = eventsel;
+ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
+ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
/* The initial sample period (overflow count) of an event. */
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index e18b30ddcdce..ebe1b9fa3c4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
return container_of(dev, struct vgic_io_device, dev);
}
-static bool check_region(const struct vgic_register_region *region,
+static bool check_region(const struct kvm *kvm,
+ const struct vgic_register_region *region,
gpa_t addr, int len)
{
- if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
- return true;
- if ((region->access_flags & VGIC_ACCESS_32bit) &&
- len == sizeof(u32) && !(addr & 3))
- return true;
- if ((region->access_flags & VGIC_ACCESS_64bit) &&
- len == sizeof(u64) && !(addr & 7))
- return true;
+ int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+
+ switch (len) {
+ case sizeof(u8):
+ flags = VGIC_ACCESS_8bit;
+ break;
+ case sizeof(u32):
+ flags = VGIC_ACCESS_32bit;
+ break;
+ case sizeof(u64):
+ flags = VGIC_ACCESS_64bit;
+ break;
+ default:
+ return false;
+ }
+
+ if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
+ if (!region->bits_per_irq)
+ return true;
+
+ /* Do we access a non-allocated IRQ? */
+ return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
+ }
return false;
}
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region || !check_region(region, addr, len)) {
+ if (!region || !check_region(vcpu->kvm, region, addr, len)) {
memset(val, 0, len);
return 0;
}
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region)
- return 0;
-
- if (!check_region(region, addr, len))
+ if (!region || !check_region(vcpu->kvm, region, addr, len))
return 0;
switch (iodev->iodev_type) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 4c34d39d44a0..84961b4e4422 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
/*
- * (addr & mask) gives us the byte offset for the INT ID, so we want to
- * divide this with 'bytes per irq' to get the INT ID, which is given
- * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
- * take advantage of the fact that division by a fraction equals
- * multiplication with the inverted fraction, and scale up both the
- * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ * (addr & mask) gives us the _byte_ offset for the INT ID.
+ * We multiply this by 8 the get the _bit_ offset, then divide this by
+ * the number of bits to learn the actual INT ID.
+ * But instead of a division (which requires a "long long div" implementation),
+ * we shift by the binary logarithm of <bits>.
+ * This assumes that <bits> is a power of two.
*/
#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
- 64 / (bits) / 8)
+ 8 >> ilog2(bits))
/*
* Some VGIC registers store per-IRQ information, with a different number
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 2893d5ba523a..6440b56ec90e 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -273,6 +273,18 @@ retry:
* no more work for us to do.
*/
spin_unlock(&irq->irq_lock);
+
+ /*
+ * We have to kick the VCPU here, because we could be
+ * queueing an edge-triggered interrupt for which we
+ * get no EOI maintenance interrupt. In that case,
+ * while the IRQ is already on the VCPU's AP list, the
+ * VCPU could have EOI'ed the original interrupt and
+ * won't see this one until it exits for some other
+ * reason.
+ */
+ if (vcpu)
+ kvm_vcpu_kick(vcpu);
return false;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8035cc1eb955..efeceb0a222d 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work)
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
+ apf->vcpu = NULL;
spin_unlock(&vcpu->async_pf.lock);
/*
@@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work)
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
{
+ spin_lock(&vcpu->async_pf.lock);
+
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
@@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
typeof(*work), queue);
list_del(&work->queue);
+ /*
+ * We know it's present in vcpu->async_pf.done, do
+ * nothing here.
+ */
+ if (!work->vcpu)
+ continue;
+
+ spin_unlock(&vcpu->async_pf.lock);
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work(&work->work);
#else
@@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
kmem_cache_free(async_pf_cache, work);
}
#endif
+ spin_lock(&vcpu->async_pf.lock);
}
- spin_lock(&vcpu->async_pf.lock);
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
list_first_entry(&vcpu->async_pf.done,