summaryrefslogtreecommitdiff
path: root/virt/kvm/coalesced_mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/coalesced_mmio.c')
-rw-r--r--virt/kvm/coalesced_mmio.c51
1 files changed, 24 insertions, 27 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index e2c197fd4f9d..375d6285475e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,27 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
-{
- struct kvm_coalesced_mmio_ring *ring;
- unsigned avail;
-
- /* Are we able to batch it ? */
-
- /* last is the first free entry
- * check if we don't meet the first used entry
- * there is always one unused entry in the buffer
- */
- ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail == 0) {
- /* full */
- return 0;
- }
-
- return 1;
-}
-
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr,
int len, const void *val)
@@ -74,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
spin_lock(&dev->kvm->ring_lock);
+ /*
+ * last is the index of the entry to fill. Verify userspace hasn't
+ * set last to be out of range, and that there is room in the ring.
+ * Leave one entry free in the ring so that userspace can differentiate
+ * between an empty ring and a full ring.
+ */
insert = READ_ONCE(ring->last);
- if (!coalesced_mmio_has_room(dev, insert) ||
- insert >= KVM_COALESCED_MMIO_MAX) {
+ if (insert >= KVM_COALESCED_MMIO_MAX ||
+ (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
@@ -111,7 +96,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct page *page;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!page)
return -ENOMEM;
@@ -174,21 +159,33 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
struct kvm_coalesced_mmio_dev *dev, *tmp;
+ int r;
if (zone->pio != 1 && zone->pio != 0)
return -EINVAL;
mutex_lock(&kvm->slots_lock);
- list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
+ list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
if (zone->pio == dev->zone.pio &&
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
- kvm_io_bus_unregister_dev(kvm,
+ r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
- kvm_iodevice_destructor(&dev->dev);
+ /*
+ * On failure, unregister destroys all devices on the
+ * bus, including the target device. There's no need
+ * to restart the walk as there aren't any zones left.
+ */
+ if (r)
+ break;
}
+ }
mutex_unlock(&kvm->slots_lock);
+ /*
+ * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
+ * perspective, the coalesced MMIO is most definitely unregistered.
+ */
return 0;
}