summaryrefslogtreecommitdiff
path: root/drivers/vhost/vdpa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/vdpa.c')
-rw-r--r--drivers/vhost/vdpa.c313
1 files changed, 262 insertions, 51 deletions
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index ec32f785dfde..05a481e4c385 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -59,6 +59,7 @@ struct vhost_vdpa {
int in_batch;
struct vdpa_iova_range range;
u32 batch_asid;
+ bool suspended;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -131,6 +132,15 @@ static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
return vhost_vdpa_alloc_as(v, asid);
}
+static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->reset_map)
+ ops->reset_map(vdpa, asid);
+}
+
static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
{
struct vhost_vdpa_as *as = asid_to_as(v, asid);
@@ -140,6 +150,14 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
hlist_del(&as->hash_link);
vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+ /*
+ * Devices with vendor specific IOMMU may need to restore
+ * iotlb to the initial or default state, which cannot be
+ * cleaned up in the all range unmap call above. Give them
+ * a chance to clean up or reset the map to the desired
+ * state.
+ */
+ vhost_vdpa_reset_map(v, asid);
kfree(as);
return 0;
@@ -161,7 +179,7 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
- eventfd_signal(call_ctx, 1);
+ eventfd_signal(call_ctx);
return IRQ_HANDLED;
}
@@ -172,7 +190,7 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
struct eventfd_ctx *config_ctx = v->config_ctx;
if (config_ctx)
- eventfd_signal(config_ctx, 1);
+ eventfd_signal(config_ctx);
return IRQ_HANDLED;
}
@@ -191,16 +209,14 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (irq < 0)
return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx;
- vq->call_ctx.producer.irq = irq;
- ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer,
+ vq->call_ctx.ctx, irq);
if (unlikely(ret))
- dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
- qid, vq->call_ctx.producer.token, ret);
+ dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.ctx, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
@@ -210,13 +226,48 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
-static int vhost_vdpa_reset(struct vhost_vdpa *v)
+static int _compat_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
+ u32 flags = 0;
+
+ v->suspended = false;
+
+ if (v->vdev.vqs) {
+ flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
+ VHOST_BACKEND_F_IOTLB_PERSIST) ?
+ VDPA_RESET_F_CLEAN_MAP : 0;
+ }
+
+ return vdpa_reset(vdpa, flags);
+}
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
+{
v->in_batch = 0;
+ return _compat_vdpa_reset(v);
+}
+
+static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->bind_mm)
+ return 0;
+
+ return ops->bind_mm(vdpa, v->vdev.mm);
+}
+
+static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->unbind_mm)
+ return;
- return vdpa_reset(vdpa);
+ ops->unbind_mm(vdpa);
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -273,7 +324,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) {
- ret = vdpa_reset(vdpa);
+ ret = _compat_vdpa_reset(v);
if (ret)
return ret;
} else
@@ -359,6 +410,22 @@ static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
return ops->suspend;
}
+static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->resume;
+}
+
+static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_vq_desc_group;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -373,11 +440,34 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
return 0;
}
+static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->get_backend_features)
+ return 0;
+ else
+ return ops->get_backend_features(vdpa);
+}
+
+static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
+ vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+}
+
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_dev *d = &v->vdev;
+ u64 actual_features;
u64 features;
+ int i;
/*
* It's not allowed to change the features after they have
@@ -392,6 +482,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (vdpa_set_features(vdpa, features))
return -EINVAL;
+ /* let the vqs know what has been configured */
+ actual_features = ops->get_driver_features(vdpa);
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->acked_features = actual_features;
+ mutex_unlock(&vq->mutex);
+ }
+
return 0;
}
@@ -491,11 +591,42 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
+
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
if (!ops->suspend)
return -EOPNOTSUPP;
- return ops->suspend(vdpa);
+ ret = ops->suspend(vdpa);
+ if (!ret)
+ v->suspended = true;
+
+ return ret;
+}
+
+/* After a successful return of this ioctl the device resumes processing
+ * virtqueue descriptors. The device becomes fully operational the same way it
+ * was before it was suspended.
+ */
+static long vhost_vdpa_resume(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
+
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
+ if (!ops->resume)
+ return -EOPNOTSUPP;
+
+ ret = ops->resume(vdpa);
+ if (!ret)
+ v->suspended = false;
+
+ return ret;
}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
@@ -536,6 +667,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
else if (copy_to_user(argp, &s, sizeof(s)))
return -EFAULT;
return 0;
+ case VHOST_VDPA_GET_VRING_DESC_GROUP:
+ if (!vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_desc_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
case VHOST_VDPA_SET_GROUP_ASID:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
@@ -544,12 +685,34 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (!ops->set_group_asid)
return -EOPNOTSUPP;
return ops->set_group_asid(vdpa, idx, s.num);
+ case VHOST_VDPA_GET_VRING_SIZE:
+ if (!ops->get_vq_size)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_size(vdpa, idx);
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
return r;
- vq->last_avail_idx = vq_state.split.avail_index;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = vq_state.packed.last_avail_idx |
+ (vq_state.packed.last_avail_counter << 15);
+ vq->last_used_idx = vq_state.packed.last_used_idx |
+ (vq_state.packed.last_used_counter << 15);
+ } else {
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
+ break;
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_unsetup_vq_irq(v, idx);
+ }
break;
}
@@ -559,6 +722,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
switch (cmd) {
case VHOST_SET_VRING_ADDR:
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
if (ops->set_vq_address(vdpa, idx,
(u64)(uintptr_t)vq->desc,
(u64)(uintptr_t)vq->avail,
@@ -567,21 +733,34 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- vq_state.split.avail_index = vq->last_avail_idx;
- if (ops->set_vq_state(vdpa, idx, &vq_state))
- r = -EINVAL;
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
+ vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
+ vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
+ vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
+ } else {
+ vq_state.split.avail_index = vq->last_avail_idx;
+ }
+ r = ops->set_vq_state(vdpa, idx, &vq_state);
break;
case VHOST_SET_VRING_CALL:
if (vq->call_ctx.ctx) {
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
+ cb.trigger = vq->call_ctx.ctx;
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_setup_vq_irq(v, idx);
} else {
cb.callback = NULL;
cb.private = NULL;
+ cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
- vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -606,11 +785,27 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
- BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
+ BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
+ BIT_ULL(VHOST_BACKEND_F_RESUME) |
+ BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
+ !vhost_vdpa_can_resume(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
+ return -EINVAL;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
+ !vhost_vdpa_has_persistent_map(v))
+ return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
}
@@ -662,6 +857,13 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features = VHOST_VDPA_BACKEND_FEATURES;
if (vhost_vdpa_can_suspend(v))
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
+ if (vhost_vdpa_can_resume(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ if (vhost_vdpa_has_desc_group(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
+ if (vhost_vdpa_has_persistent_map(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+ features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
@@ -677,6 +879,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_SUSPEND:
r = vhost_vdpa_suspend(v);
break;
+ case VHOST_VDPA_RESUME:
+ r = vhost_vdpa_resume(v);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -684,6 +889,17 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
}
+ if (r)
+ goto out;
+
+ switch (cmd) {
+ case VHOST_SET_OWNER:
+ r = vhost_vdpa_bind_mm(v);
+ if (r)
+ vhost_dev_reset_owner(d, NULL);
+ break;
+ }
+out:
mutex_unlock(&d->mutex);
return r;
}
@@ -792,7 +1008,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
- perm_to_iommu_flags(perm));
+ perm_to_iommu_flags(perm),
+ GFP_KERNEL_ACCOUNT);
}
if (r) {
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
@@ -819,11 +1036,7 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb);
}
- /* If we are in the middle of batch processing, delay the free
- * of AS until BATCH_END.
- */
- if (!v->in_batch && !iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
+
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
@@ -920,7 +1133,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
while (npages) {
sz2pin = min_t(unsigned long, npages, list_size);
pinned = pin_user_pages(cur_base, sz2pin,
- gup_flags, page_list, NULL);
+ gup_flags, page_list);
if (sz2pin != pinned) {
if (pinned < 0) {
ret = pinned;
@@ -1080,8 +1293,6 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
if (v->in_batch && ops->set_map)
ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
- if (!iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
break;
default:
r = -EINVAL;
@@ -1107,24 +1318,26 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- struct device *dma_dev = vdpa_get_dma_dev(vdpa);
- struct bus_type *bus;
+ union virtio_map map = vdpa_get_map(vdpa);
+ struct device *dma_dev = map.dma_dev;
int ret;
/* Device want to do DMA by itself */
if (ops->set_map || ops->dma_map)
return 0;
- bus = dma_dev->bus;
- if (!bus)
- return -EFAULT;
-
- if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
+ dev_warn_once(&v->dev,
+ "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
return -ENOTSUPP;
+ }
- v->domain = iommu_domain_alloc(bus);
- if (!v->domain)
- return -EIO;
+ v->domain = iommu_paging_domain_alloc(dma_dev);
+ if (IS_ERR(v->domain)) {
+ ret = PTR_ERR(v->domain);
+ v->domain = NULL;
+ return ret;
+ }
ret = iommu_attach_device(v->domain, dma_dev);
if (ret)
@@ -1134,13 +1347,15 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
err_attach:
iommu_domain_free(v->domain);
+ v->domain = NULL;
return ret;
}
static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ union virtio_map map = vdpa_get_map(vdpa);
+ struct device *dma_dev = map.dma_dev;
if (v->domain) {
iommu_detach_device(v->domain, dma_dev);
@@ -1178,8 +1393,10 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
vhost_vdpa_remove_as(v, asid);
}
+ vhost_vdpa_free_domain(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
+ v->vdev.vqs = NULL;
}
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
@@ -1211,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
+ vqs[i]->call_ctx.ctx = NULL;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
@@ -1250,7 +1468,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
- vhost_vdpa_free_domain(v);
+ vhost_vdpa_unbind_mm(v);
vhost_vdpa_config_put(v);
vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);
@@ -1273,13 +1491,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
notify = ops->get_vq_notification(vdpa, index);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- PFN_DOWN(notify.addr), PAGE_SIZE,
- vma->vm_page_prot))
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
}
static const struct vm_operations_struct vhost_vdpa_vm_ops = {
@@ -1315,7 +1527,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
@@ -1338,7 +1550,7 @@ static void vhost_vdpa_release_dev(struct device *device)
struct vhost_vdpa *v =
container_of(device, struct vhost_vdpa, dev);
- ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ ida_free(&vhost_vdpa_ida, v->minor);
kfree(v->vqs);
kfree(v);
}
@@ -1361,8 +1573,8 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
if (!v)
return -ENOMEM;
- minor = ida_simple_get(&vhost_vdpa_ida, 0,
- VHOST_VDPA_DEV_MAX, GFP_KERNEL);
+ minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
+ GFP_KERNEL);
if (minor < 0) {
kfree(v);
return minor;
@@ -1406,7 +1618,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
- ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}