summaryrefslogtreecommitdiff
path: root/drivers/virtio/virtio_vdpa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio/virtio_vdpa.c')
-rw-r--r--drivers/virtio/virtio_vdpa.c113
1 files changed, 45 insertions, 68 deletions
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 989e2d7184ce..0a801f67b599 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -28,19 +28,6 @@ struct virtio_vdpa_device {
struct virtio_device vdev;
struct vdpa_device *vdpa;
u64 features;
-
- /* The lock to protect virtqueue list */
- spinlock_t lock;
- /* List of virtio_vdpa_vq_info */
- struct list_head virtqueues;
-};
-
-struct virtio_vdpa_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
};
static inline struct virtio_vdpa_device *
@@ -93,14 +80,14 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- return vdpa_set_status(vdpa, status);
+ vdpa_set_status(vdpa, status);
}
static void virtio_vdpa_reset(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- vdpa_reset(vdpa);
+ vdpa_reset(vdpa, 0);
}
static bool virtio_vdpa_notify(struct virtqueue *vq)
@@ -135,9 +122,9 @@ static irqreturn_t virtio_vdpa_config_cb(void *private)
static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
{
- struct virtio_vdpa_vq_info *info = private;
+ struct virtqueue *vq = private;
- return vring_interrupt(0, info->vq);
+ return vring_interrupt(0, vq);
}
static struct virtqueue *
@@ -145,18 +132,15 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
- struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- struct device *dma_dev;
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info;
bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
+ union virtio_map map = {0};
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
- unsigned long flags;
u32 align, max_num, min_num = 1;
bool may_reduce_num = true;
int err;
@@ -179,12 +163,11 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
+ if (ops->get_vq_size)
+ max_num = ops->get_vq_size(vdpa, index);
+ else
+ max_num = ops->get_vq_num_max(vdpa);
- max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
@@ -193,28 +176,32 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
- may_reduce_num = (max_num == min_num) ? false : true;
+ may_reduce_num = (max_num != min_num);
/* Create the vring */
align = ops->get_vq_align(vdpa);
- if (ops->get_vq_dma_dev)
- dma_dev = ops->get_vq_dma_dev(vdpa, index);
+ if (ops->get_vq_map)
+ map = ops->get_vq_map(vdpa, index);
else
- dma_dev = vdpa_get_dma_dev(vdpa);
- vq = vring_create_virtqueue_dma(index, max_num, align, vdev,
+ map = vdpa_get_map(vdpa);
+
+ vq = vring_create_virtqueue_map(index, max_num, align, vdev,
true, may_reduce_num, ctx,
- notify, callback, name, dma_dev);
+ notify, callback, name, map);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
+ if (index == 0)
+ vdev->vmap = map;
+
vq->num_max = max_num;
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
- cb.private = info;
+ cb.private = vq;
cb.trigger = NULL;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -245,13 +232,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
ops->set_vq_ready(vdpa, index, 1);
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_add(&info->node, &vd_dev->virtqueues);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
-
return vq;
err_vq:
@@ -260,7 +240,6 @@ error_new_virtqueue:
ops->set_vq_ready(vdpa, index, 0);
/* VDPA driver should make sure vq is stopeed here */
WARN_ON(ops->get_vq_ready(vdpa, index));
- kfree(info);
return ERR_PTR(err);
}
@@ -269,20 +248,12 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
struct vdpa_device *vdpa = vd_dev->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info = vq->priv;
unsigned int index = vq->index;
- unsigned long flags;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
/* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
vring_del_virtqueue(vq);
-
- kfree(info);
}
static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
@@ -326,20 +297,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
+ unsigned int nr_masks;
int j;
- struct cpumask *result = group_cpus_evenly(this_vecs);
+ struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
if (!result) {
kfree(masks);
return NULL;
}
- for (j = 0; j < this_vecs; j++)
+ for (j = 0; j < nr_masks; j++)
cpumask_copy(&masks[curvec + j], &result[j]);
kfree(result);
- curvec += this_vecs;
- usedvecs += this_vecs;
+ curvec += nr_masks;
+ usedvecs += nr_masks;
}
/* Fill out vectors at the end that don't need affinity */
@@ -355,49 +327,54 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
- struct irq_affinity default_affd = { 0 };
struct cpumask *masks;
struct vdpa_callback cb;
+ bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0;
- masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
- if (!masks)
- return -ENOMEM;
+ if (has_affinity) {
+ masks = create_affinity_masks(nvqs, desc);
+ if (!masks)
+ return -ENOMEM;
+ }
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
- callbacks[i], names[i], ctx ?
- ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
}
- if (ops->set_vq_affinity)
+ if (has_affinity)
ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb);
+ if (has_affinity)
+ kfree(masks);
return 0;
err_setup_vq:
virtio_vdpa_del_vqs(vdev);
+ if (has_affinity)
+ kfree(masks);
return err;
}
@@ -489,12 +466,12 @@ static int virtio_vdpa_probe(struct vdpa_device *vdpa)
if (!vd_dev)
return -ENOMEM;
- vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
+ vd_dev->vdev.dev.parent = vdpa->map ? &vdpa->dev :
+ vdpa_get_map(vdpa).dma_dev;
vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
vd_dev->vdev.config = &virtio_vdpa_config_ops;
+ vd_dev->vdev.map = vdpa->map;
vd_dev->vdpa = vdpa;
- INIT_LIST_HEAD(&vd_dev->virtqueues);
- spin_lock_init(&vd_dev->lock);
vd_dev->vdev.id.device = ops->get_device_id(vdpa);
if (vd_dev->vdev.id.device == 0)