summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-12 09:50:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-12 09:50:34 -0700
commit7a53e17accce9d310d2e522dfc701d8da7ccfa65 (patch)
treec1ccf061aee42178159cbe9c31c2c4e004b76947 /drivers/virtio
parent999324f58c41262f5b64d04b7ac54e8f79b019fd (diff)
parent93e530d2a1c4c0fcce45e01ae6c5c6287a08d3e3 (diff)
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: - A huge patchset supporting vq resize using the new vq reset capability - Features, fixes, and cleanups all over the place * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (88 commits) vdpa/mlx5: Fix possible uninitialized return value vdpa_sim_blk: add support for discard and write-zeroes vdpa_sim_blk: add support for VIRTIO_BLK_T_FLUSH vdpa_sim_blk: make vdpasim_blk_check_range usable by other requests vdpa_sim_blk: check if sector is 0 for commands other than read or write vdpa_sim: Implement suspend vdpa op vhost-vdpa: uAPI to suspend the device vhost-vdpa: introduce SUSPEND backend feature bit vdpa: Add suspend operation virtio-blk: Avoid use-after-free on suspend/resume virtio_vdpa: support the arg sizes of find_vqs() vhost-vdpa: Call ida_simple_remove() when failed vDPA: fix 'cast to restricted le16' warnings in vdpa.c vDPA: !FEATURES_OK should not block querying device config space vDPA/ifcvf: support userspace to query features and MQ of a management device vDPA/ifcvf: get_config_size should return a value no greater than dev implementation vhost scsi: Allow user to control num virtqueues vhost-scsi: Fix max number of virtqueues vdpa/mlx5: Support different address spaces for control and data vdpa/mlx5: Implement susupend virtqueue callback ...
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/virtio/virtio_mmio.c14
-rw-r--r--drivers/virtio/virtio_pci_common.c32
-rw-r--r--drivers/virtio/virtio_pci_common.h3
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c153
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c770
-rw-r--r--drivers/virtio/virtio_vdpa.c18
10 files changed, 791 insertions, 261 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 56c77f63cd22..0a53a61231c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,11 +35,12 @@ if VIRTIO_MENU
config VIRTIO_HARDEN_NOTIFICATION
bool "Harden virtio notification"
+ depends on BROKEN
help
Enable this to harden the device notifications and suppress
those that happen at a time where notifications are illegal.
- Experimental: Note that several drivers still have bugs that
+ Experimental: Note that several drivers still have issues that
may cause crashes or hangs when correct handling of
notifications is enforced; depending on the subset of
drivers and devices you use, this may or may not work.
@@ -126,9 +127,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver was only tested under x86-64 and arm64, but should
- theoretically work on all architectures that support memory hotplug
- and hotremove.
+ This driver currently only supports x86-64 and arm64. Although it
+ should compile on other architectures that implement memory
+ hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to work as
+ expected.
If unsure, say M.
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 14c142d77fba..828ced060742 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -428,7 +428,9 @@ int register_virtio_device(struct virtio_device *dev)
goto out;
dev->index = err;
- dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = dev_set_name(&dev->dev, "virtio%u", dev->index);
+ if (err)
+ goto out_ida_remove;
err = virtio_device_of_init(dev);
if (err)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 083ff1eb743d..c492a57531c6 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -360,7 +360,7 @@ static void vm_synchronize_cbs(struct virtio_device *vdev)
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
+ const char *name, u32 size, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info;
@@ -395,14 +395,19 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ if (!size || size > num)
+ size = num;
+
/* Create the vring */
- vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ vq = vring_create_virtqueue(index, size, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, true, ctx, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
+ vq->num_max = num;
+
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
@@ -472,6 +477,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
+ u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -487,6 +493,9 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
if (err)
return err;
+ if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
+ enable_irq_wake(irq);
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
@@ -494,6 +503,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
+ sizes ? sizes[i] : 0,
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..00ad476a815d 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -174,6 +174,7 @@ error:
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
+ u32 size,
bool ctx,
u16 msix_vec)
{
@@ -186,7 +187,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
if (!info)
return ERR_PTR(-ENOMEM);
- vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
+ vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, size, ctx,
msix_vec);
if (IS_ERR(vq))
goto out_info;
@@ -214,9 +215,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ /*
+ * If it fails during re-enable reset vq. This way we won't rejoin
+ * info->node to the queue. Prevent unexpected irqs.
+ */
+ if (!vq->reset) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ }
vp_dev->del_vq(info);
kfree(info);
@@ -277,7 +284,7 @@ void vp_del_vqs(struct virtio_device *vdev)
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], bool per_vq_vectors,
+ const char * const names[], u32 sizes[], bool per_vq_vectors,
const bool *ctx,
struct irq_affinity *desc)
{
@@ -320,8 +327,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false,
- msix_vec);
+ sizes ? sizes[i] : 0,
+ ctx ? ctx[i] : false, msix_vec);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
@@ -351,7 +358,7 @@ error_find:
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx)
+ const char * const names[], u32 sizes[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err, queue_idx = 0;
@@ -373,6 +380,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
+ sizes ? sizes[i] : 0,
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
@@ -390,21 +398,21 @@ out_del_vqs:
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ const char * const names[], u32 sizes[], const bool *ctx,
struct irq_affinity *desc)
{
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, true, ctx, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, false, ctx, desc);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, sizes, ctx);
}
const char *vp_bus_name(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 23112d84218f..c0448378b698 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -80,6 +80,7 @@ struct virtio_pci_device {
unsigned int idx,
void (*callback)(struct virtqueue *vq),
const char *name,
+ u32 size,
bool ctx,
u16 msix_vec);
void (*del_vq)(struct virtio_pci_vq_info *info);
@@ -110,7 +111,7 @@ void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ const char * const names[], u32 sizes[], const bool *ctx,
struct irq_affinity *desc);
const char *vp_bus_name(struct virtio_device *vdev);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5e5721145c7..d75e5c4e637f 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
+ u32 size,
bool ctx,
u16 msix_vec)
{
@@ -125,16 +126,21 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
return ERR_PTR(-ENOENT);
+ if (!size || size > num)
+ size = num;
+
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, num,
+ vq = vring_create_virtqueue(index, size,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
true, false, ctx,
vp_notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
+ vq->num_max = num;
+
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 623906b4996c..f7965c5dd36b 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+ if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+ __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
/* virtio config->finalize_features() implementation */
@@ -176,6 +179,110 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
+static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ unsigned long index;
+
+ index = vq->index;
+
+ /* activate the queue */
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+ return -ENOENT;
+
+ vp_modern_set_queue_reset(mdev, vq->index);
+
+ info = vp_dev->vqs[vq->index];
+
+ /* delete vq from irq handler */
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ INIT_LIST_HEAD(&info->node);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_break(vq);
+#endif
+
+ /* For the case where vq has an exclusive irq, call synchronize_irq() to
+ * wait for completion.
+ *
+ * note: We can't use disable_irq() since it conflicts with the affinity
+ * managed IRQ that is used by some drivers.
+ */
+ if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+ vq->reset = true;
+
+ return 0;
+}
+
+static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags, index;
+ int err;
+
+ if (!vq->reset)
+ return -EBUSY;
+
+ index = vq->index;
+ info = vp_dev->vqs[index];
+
+ if (vp_modern_get_queue_reset(mdev, index))
+ return -EBUSY;
+
+ if (vp_modern_get_queue_enable(mdev, index))
+ return -EBUSY;
+
+ err = vp_active_vq(vq, info->msix_vector);
+ if (err)
+ return err;
+
+ if (vq->callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_unbreak(vq);
+#endif
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+ vq->reset = false;
+
+ return 0;
+}
+
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -186,6 +293,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
+ u32 size,
bool ctx,
u16 msix_vec)
{
@@ -203,47 +311,39 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (num & (num - 1)) {
- dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
+ if (!size || size > num)
+ size = num;
+
+ if (size & (size - 1)) {
+ dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", size);
return ERR_PTR(-EINVAL);
}
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, num,
+ vq = vring_create_virtqueue(index, size,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, true, ctx,
vp_notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
- /* activate the queue */
- vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
- vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
- virtqueue_get_avail_addr(vq),
- virtqueue_get_used_addr(vq));
+ vq->num_max = num;
+
+ err = vp_active_vq(vq, msix_vec);
+ if (err)
+ goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
- goto err_map_notify;
- }
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto err_assign_vector;
- }
+ goto err;
}
return vq;
-err_assign_vector:
- if (!mdev->notify_base)
- pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
-err_map_notify:
+err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -251,12 +351,15 @@ err_map_notify:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ const char * const names[],
+ u32 sizes[],
+ const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
- int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
+ int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, ctx,
+ desc);
if (rc)
return rc;
@@ -401,6 +504,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -419,6 +524,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index fa2a9445bb18..869cb46bef96 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
@@ -475,6 +476,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
+ * vp_modern_get_queue_reset - get the queue reset status
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ return vp_ioread16(&cfg->queue_reset);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
+
+/*
+ * vp_modern_set_queue_reset - reset the queue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ vp_iowrite16(1, &cfg->queue_reset);
+
+ while (vp_ioread16(&cfg->queue_reset))
+ msleep(1);
+
+ while (vp_ioread16(&cfg->cfg.queue_enable))
+ msleep(1);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
+
+/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..d66c8e6d0ef3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
+
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
-
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
-
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2180,16 +2458,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2481,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2491,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2500,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2541,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2622,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2667,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2726,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c40f7deb6b5a..9bc4d110b800 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -131,7 +131,7 @@ static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
static struct virtqueue *
virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
+ const char *name, u32 size, bool ctx)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -168,14 +168,17 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ if (!size || size > max_num)
+ size = max_num;
+
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
- may_reduce_num = (max_num == min_num) ? false : true;
+ may_reduce_num = (size == min_num) ? false : true;
/* Create the vring */
align = ops->get_vq_align(vdpa);
- vq = vring_create_virtqueue(index, max_num, align, vdev,
+ vq = vring_create_virtqueue(index, size, align, vdev,
true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
@@ -183,6 +186,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ vq->num_max = max_num;
+
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
@@ -267,6 +272,7 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
+ u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -282,9 +288,9 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
- callbacks[i], names[i], ctx ?
- ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, callbacks[i],
+ names[i], sizes ? sizes[i] : 0,
+ ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;