summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 17:05:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 17:05:34 -0700
commit8ccd54fe45713cd458015b5b08d6098545e70543 (patch)
tree12a034b2ee42d681b2e915815c4529d6f246bcc4 /drivers/virtio
parent0835b5ee8704aef4e19b369237a762c52c7b6fb1 (diff)
parentc82729e06644f4e087f5ff0f91b8fb15e03b8890 (diff)
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: "virtio,vhost,vdpa: features, fixes, and cleanups: - reduction in interrupt rate in virtio - perf improvement for VDUSE - scalability for vhost-scsi - non power of 2 ring support for packed rings - better management for mlx5 vdpa - suspend for snet - VIRTIO_F_NOTIFICATION_DATA - shared backend with vdpa-sim-blk - user VA support in vdpa-sim - better struct packing for virtio and fixes, cleanups all over the place" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (52 commits) vhost_vdpa: fix unmap process in no-batch mode MAINTAINERS: make me a reviewer of VIRTIO CORE AND NET DRIVERS tools/virtio: fix build caused by virtio_ring changes virtio_ring: add a struct device forward declaration vdpa_sim_blk: support shared backend vdpa_sim: move buffer allocation in the devices vdpa/snet: use likely/unlikely macros in hot functions vdpa/snet: implement kick_vq_with_data callback virtio-vdpa: add VIRTIO_F_NOTIFICATION_DATA feature support virtio: add VIRTIO_F_NOTIFICATION_DATA feature support vdpa/snet: support the suspend vDPA callback vdpa/snet: support getting and setting VQ state MAINTAINERS: add vringh.h to Virtio Core and Net Drivers vringh: address kdoc warnings vdpa: address kdoc warnings virtio_ring: don't update event idx on get_buf vdpa_sim: add support for user VA vdpa_sim: replace the spinlock with a mutex to protect the state vdpa_sim: use kthread worker vdpa_sim: make devices agnostic for work management ...
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_mmio.c18
-rw-r--r--drivers/virtio/virtio_pci_modern.c22
-rw-r--r--drivers/virtio/virtio_ring.c89
-rw-r--r--drivers/virtio/virtio_vdpa.c120
4 files changed, 210 insertions, 39 deletions
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4b5e883055ee..a46a4a29e929 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -286,6 +286,16 @@ static bool vm_notify(struct virtqueue *vq)
return true;
}
+static bool vm_notify_with_data(struct virtqueue *vq)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
+ u32 data = vring_notification_data(vq);
+
+ writel(data, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+
+ return true;
+}
+
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vm_interrupt(int irq, void *opaque)
{
@@ -364,12 +374,18 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
const char *name, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ bool (*notify)(struct virtqueue *vq);
struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
unsigned long flags;
unsigned int num;
int err;
+ if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
+ notify = vm_notify_with_data;
+ else
+ notify = vm_notify;
+
if (!name)
return NULL;
@@ -398,7 +414,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
/* Create the vring */
vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
- true, true, ctx, vm_notify, callback, name);
+ true, true, ctx, notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 9e496e288cfa..d6bb68ba84e5 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -288,6 +288,15 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
return vp_modern_config_vector(&vp_dev->mdev, vector);
}
+static bool vp_notify_with_data(struct virtqueue *vq)
+{
+ u32 data = vring_notification_data(vq);
+
+ iowrite32(data, (void __iomem *)vq->priv);
+
+ return true;
+}
+
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned int index,
@@ -298,10 +307,16 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
u16 num;
int err;
+ if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
+ notify = vp_notify_with_data;
+ else
+ notify = vp_notify;
+
if (index >= vp_modern_get_num_queues(mdev))
return ERR_PTR(-EINVAL);
@@ -310,18 +325,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (!is_power_of_2(num)) {
- dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
- return ERR_PTR(-EINVAL);
- }
-
info->msix_vector = msix_vec;
/* create the vring */
vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, true, ctx,
- vp_notify, callback, name);
+ notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 41144b5246a8..c5310eaf8b46 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -231,10 +231,10 @@ static void vring_free(struct virtqueue *_vq);
* Helpers.
*/
-#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+#define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
-static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
- unsigned int total_sg)
+static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
+ unsigned int total_sg)
{
/*
* If the host supports indirect descriptor tables, and we have multiple
@@ -269,7 +269,7 @@ static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
* unconditionally on data path.
*/
-static bool vring_use_dma_api(struct virtio_device *vdev)
+static bool vring_use_dma_api(const struct virtio_device *vdev)
{
if (!virtio_has_dma_quirk(vdev))
return true;
@@ -289,7 +289,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
}
-size_t virtio_max_dma_size(struct virtio_device *vdev)
+size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
@@ -349,7 +349,7 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
* making all of the arch DMA ops work on the vring device itself
* is a mess.
*/
-static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->dma_dev;
}
@@ -423,7 +423,7 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
*/
static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
+ const struct vring_desc *desc)
{
u16 flags;
@@ -784,7 +784,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
}
}
-static inline bool more_used_split(const struct vring_virtqueue *vq)
+static bool more_used_split(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
@@ -854,6 +854,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
if (vq->event)
/* TODO: this is a hack. Figure out a cleaner value to write. */
vring_used_event(&vq->split.vring) = 0x0;
@@ -1172,18 +1180,18 @@ err:
/*
* Packed ring specific functions - *_packed().
*/
-static inline bool packed_used_wrap_counter(u16 last_used_idx)
+static bool packed_used_wrap_counter(u16 last_used_idx)
{
return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
-static inline u16 packed_last_used(u16 last_used_idx)
+static u16 packed_last_used(u16 last_used_idx)
{
return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
- struct vring_desc_extra *extra)
+ const struct vring_desc_extra *extra)
{
u16 flags;
@@ -1206,7 +1214,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
}
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
- struct vring_packed_desc *desc)
+ const struct vring_packed_desc *desc)
{
u16 flags;
@@ -1612,7 +1620,7 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
return avail == used && used == used_wrap_counter;
}
-static inline bool more_used_packed(const struct vring_virtqueue *vq)
+static bool more_used_packed(const struct vring_virtqueue *vq)
{
u16 last_used;
u16 last_used_idx;
@@ -1699,6 +1707,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
@@ -2330,12 +2346,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- /* If device triggered an event already it won't trigger one again:
- * no need to disable.
- */
- if (vq->event_triggered)
- return;
-
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
@@ -2752,6 +2762,23 @@ void vring_del_virtqueue(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
+u32 vring_notification_data(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 next;
+
+ if (vq->packed_ring)
+ next = (vq->packed.next_avail_idx &
+ ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
+ vq->packed.avail_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR;
+ else
+ next = vq->split.avail_idx_shadow;
+
+ return next << 16 | _vq->index;
+}
+EXPORT_SYMBOL_GPL(vring_notification_data);
+
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
@@ -2771,6 +2798,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_ORDER_PLATFORM:
break;
+ case VIRTIO_F_NOTIFICATION_DATA:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
@@ -2786,10 +2815,10 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
+unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
}
@@ -2819,9 +2848,9 @@ void __virtqueue_unbreak(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
-bool virtqueue_is_broken(struct virtqueue *_vq)
+bool virtqueue_is_broken(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return READ_ONCE(vq->broken);
}
@@ -2868,9 +2897,9 @@ void __virtio_unbreak_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
-dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2881,9 +2910,9 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
-dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2895,9 +2924,9 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
-dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2910,7 +2939,7 @@ dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
/* Only available for split ring */
-const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
{
return &to_vvq(vq)->split.vring;
}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index d7f5af62ddaa..eb6aee8c06b2 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uuid.h>
+#include <linux/group_cpus.h>
#include <linux/virtio.h>
#include <linux/vdpa.h>
#include <linux/virtio_config.h>
@@ -112,6 +113,17 @@ static bool virtio_vdpa_notify(struct virtqueue *vq)
return true;
}
+static bool virtio_vdpa_notify_with_data(struct virtqueue *vq)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 data = vring_notification_data(vq);
+
+ ops->kick_vq_with_data(vdpa, data);
+
+ return true;
+}
+
static irqreturn_t virtio_vdpa_config_cb(void *private)
{
struct virtio_vdpa_device *vd_dev = private;
@@ -138,6 +150,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
struct device *dma_dev;
const struct vdpa_config_ops *ops = vdpa->config;
struct virtio_vdpa_vq_info *info;
+ bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
@@ -154,6 +167,14 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (index >= vdpa->nvqs)
return ERR_PTR(-ENOENT);
+ /* We cannot accept VIRTIO_F_NOTIFICATION_DATA without kick_vq_with_data */
+ if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA)) {
+ if (ops->kick_vq_with_data)
+ notify = virtio_vdpa_notify_with_data;
+ else
+ __virtio_clear_bit(vdev, VIRTIO_F_NOTIFICATION_DATA);
+ }
+
/* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);
@@ -183,8 +204,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
dma_dev = vdpa_get_dma_dev(vdpa);
vq = vring_create_virtqueue_dma(index, max_num, align, vdev,
true, may_reduce_num, ctx,
- virtio_vdpa_notify, callback,
- name, dma_dev);
+ notify, callback, name, dma_dev);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -195,6 +215,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
+ cb.trigger = NULL;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -272,6 +293,66 @@ static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
virtio_vdpa_del_vq(vq);
}
+static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
+{
+ affd->nr_sets = 1;
+ affd->set_size[0] = affvecs;
+}
+
+static struct cpumask *
+create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+{
+ unsigned int affvecs = 0, curvec, usedvecs, i;
+ struct cpumask *masks = NULL;
+
+ if (nvecs > affd->pre_vectors + affd->post_vectors)
+ affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+
+ if (!affd->calc_sets)
+ affd->calc_sets = default_calc_sets;
+
+ affd->calc_sets(affd, affvecs);
+
+ if (!affvecs)
+ return NULL;
+
+ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ return NULL;
+
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+ cpumask_setall(&masks[curvec]);
+
+ for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
+ unsigned int this_vecs = affd->set_size[i];
+ int j;
+ struct cpumask *result = group_cpus_evenly(this_vecs);
+
+ if (!result) {
+ kfree(masks);
+ return NULL;
+ }
+
+ for (j = 0; j < this_vecs; j++)
+ cpumask_copy(&masks[curvec + j], &result[j]);
+ kfree(result);
+
+ curvec += this_vecs;
+ usedvecs += this_vecs;
+ }
+
+ /* Fill out vectors at the end that don't need affinity */
+ if (usedvecs >= affvecs)
+ curvec = affd->pre_vectors + affvecs;
+ else
+ curvec = affd->pre_vectors + usedvecs;
+ for (; curvec < nvecs; curvec++)
+ cpumask_setall(&masks[curvec]);
+
+ return masks;
+}
+
static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
@@ -282,9 +363,15 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
+ struct irq_affinity default_affd = { 0 };
+ struct cpumask *masks;
struct vdpa_callback cb;
int i, err, queue_idx = 0;
+ masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
+ if (!masks)
+ return -ENOMEM;
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
@@ -298,6 +385,7 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
}
+ ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
@@ -337,6 +425,32 @@ static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
return dev_name(&vdpa->dev);
}
+static int virtio_vdpa_set_vq_affinity(struct virtqueue *vq,
+ const struct cpumask *cpu_mask)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ unsigned int index = vq->index;
+
+ if (ops->set_vq_affinity)
+ return ops->set_vq_affinity(vdpa, index, cpu_mask);
+
+ return 0;
+}
+
+static const struct cpumask *
+virtio_vdpa_get_vq_affinity(struct virtio_device *vdev, int index)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_vq_affinity)
+ return ops->get_vq_affinity(vdpa, index);
+
+ return NULL;
+}
+
static const struct virtio_config_ops virtio_vdpa_config_ops = {
.get = virtio_vdpa_get,
.set = virtio_vdpa_set,
@@ -349,6 +463,8 @@ static const struct virtio_config_ops virtio_vdpa_config_ops = {
.get_features = virtio_vdpa_get_features,
.finalize_features = virtio_vdpa_finalize_features,
.bus_name = virtio_vdpa_bus_name,
+ .set_vq_affinity = virtio_vdpa_set_vq_affinity,
+ .get_vq_affinity = virtio_vdpa_get_vq_affinity,
};
static void virtio_vdpa_release_dev(struct device *_d)