diff options
Diffstat (limited to 'drivers/misc/uacce/uacce.c')
| -rw-r--r-- | drivers/misc/uacce/uacce.c | 279 |
1 files changed, 168 insertions, 111 deletions
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 488eeb2811ae..42e7d2a2a90c 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -7,45 +7,43 @@ #include <linux/slab.h> #include <linux/uacce.h> -static struct class *uacce_class; static dev_t uacce_devt; -static DEFINE_MUTEX(uacce_mutex); static DEFINE_XARRAY_ALLOC(uacce_xa); -static int uacce_start_queue(struct uacce_queue *q) +static const struct class uacce_class = { + .name = UACCE_NAME, +}; + +/* + * If the parent driver or the device disappears, the queue state is invalid and + * ops are not usable anymore. + */ +static bool uacce_queue_is_valid(struct uacce_queue *q) { - int ret = 0; + return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED; +} - mutex_lock(&uacce_mutex); +static int uacce_start_queue(struct uacce_queue *q) +{ + int ret; - if (q->state != UACCE_Q_INIT) { - ret = -EINVAL; - goto out_with_lock; - } + if (q->state != UACCE_Q_INIT) + return -EINVAL; if (q->uacce->ops->start_queue) { ret = q->uacce->ops->start_queue(q); if (ret < 0) - goto out_with_lock; + return ret; } q->state = UACCE_Q_STARTED; - -out_with_lock: - mutex_unlock(&uacce_mutex); - - return ret; + return 0; } static int uacce_put_queue(struct uacce_queue *q) { struct uacce_device *uacce = q->uacce; - mutex_lock(&uacce_mutex); - - if (q->state == UACCE_Q_ZOMBIE) - goto out; - if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue) uacce->ops->stop_queue(q); @@ -54,8 +52,6 @@ static int uacce_put_queue(struct uacce_queue *q) uacce->ops->put_queue(q); q->state = UACCE_Q_ZOMBIE; -out: - mutex_unlock(&uacce_mutex); return 0; } @@ -65,20 +61,36 @@ static long uacce_fops_unl_ioctl(struct file *filep, { struct uacce_queue *q = filep->private_data; struct uacce_device *uacce = q->uacce; + long ret = -ENXIO; + + /* + * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from + * user. Avoid a circular lock dependency with uacce_fops_mmap(), which + * gets called with mmap_lock held, by taking uacce->mutex instead of + * q->mutex. Doing this in uacce_fops_mmap() is not possible because + * uacce_fops_open() calls iommu_sva_bind_device(), which takes + * mmap_lock, while holding uacce->mutex. + */ + mutex_lock(&uacce->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; switch (cmd) { case UACCE_CMD_START_Q: - return uacce_start_queue(q); - + ret = uacce_start_queue(q); + break; case UACCE_CMD_PUT_Q: - return uacce_put_queue(q); - + ret = uacce_put_queue(q); + break; default: - if (!uacce->ops->ioctl) - return -EINVAL; - - return uacce->ops->ioctl(q, cmd, arg); + if (uacce->ops->ioctl) + ret = uacce->ops->ioctl(q, cmd, arg); + else + ret = -EINVAL; } +out_unlock: + mutex_unlock(&uacce->mutex); + return ret; } #ifdef CONFIG_COMPAT @@ -99,7 +111,7 @@ static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q) if (!(uacce->flags & UACCE_DEV_SVA)) return 0; - handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL); + handle = iommu_sva_bind_device(uacce->parent, current->mm); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -136,6 +148,13 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) if (!q) return -ENOMEM; + mutex_lock(&uacce->mutex); + + if (!uacce->parent) { + ret = -EINVAL; + goto out_with_mem; + } + ret = uacce_bind_queue(uacce, q); if (ret) goto out_with_mem; @@ -150,12 +169,11 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) init_waitqueue_head(&q->wait); filep->private_data = q; - uacce->inode = inode; q->state = UACCE_Q_INIT; - - mutex_lock(&uacce->queues_lock); + q->mapping = filep->f_mapping; + mutex_init(&q->mutex); list_add(&q->list, &uacce->queues); - mutex_unlock(&uacce->queues_lock); + mutex_unlock(&uacce->mutex); return 0; @@ -163,18 +181,20 @@ out_with_bond: uacce_unbind_queue(q); out_with_mem: kfree(q); + mutex_unlock(&uacce->mutex); return ret; } static int uacce_fops_release(struct inode *inode, struct file *filep) { struct uacce_queue *q = filep->private_data; + struct uacce_device *uacce = q->uacce; - mutex_lock(&q->uacce->queues_lock); - list_del(&q->list); - mutex_unlock(&q->uacce->queues_lock); + mutex_lock(&uacce->mutex); uacce_put_queue(q); uacce_unbind_queue(q); + list_del(&q->list); + mutex_unlock(&uacce->mutex); kfree(q); return 0; @@ -183,12 +203,15 @@ static int uacce_fops_release(struct inode *inode, struct file *filep) static void uacce_vma_close(struct vm_area_struct *vma) { struct uacce_queue *q = vma->vm_private_data; - struct uacce_qfile_region *qfr = NULL; - if (vma->vm_pgoff < UACCE_MAX_REGION) - qfr = q->qfrs[vma->vm_pgoff]; + if (vma->vm_pgoff < UACCE_MAX_REGION) { + struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff]; - kfree(qfr); + mutex_lock(&q->mutex); + q->qfrs[vma->vm_pgoff] = NULL; + mutex_unlock(&q->mutex); + kfree(qfr); + } } static const struct vm_operations_struct uacce_vm_ops = { @@ -212,15 +235,14 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) if (!qfr) return -ENOMEM; - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK; + vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK); vma->vm_ops = &uacce_vm_ops; vma->vm_private_data = q; qfr->type = type; - mutex_lock(&uacce_mutex); - - if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) { - ret = -EINVAL; + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) { + ret = -ENXIO; goto out_with_lock; } @@ -248,12 +270,12 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) } q->qfrs[type] = qfr; - mutex_unlock(&uacce_mutex); + mutex_unlock(&q->mutex); return ret; out_with_lock: - mutex_unlock(&uacce_mutex); + mutex_unlock(&q->mutex); kfree(qfr); return ret; } @@ -262,12 +284,20 @@ static __poll_t uacce_fops_poll(struct file *file, poll_table *wait) { struct uacce_queue *q = file->private_data; struct uacce_device *uacce = q->uacce; + __poll_t ret = 0; + + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; poll_wait(file, &q->wait, wait); + if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) - return EPOLLIN | EPOLLRDNORM; + ret = EPOLLIN | EPOLLRDNORM; - return 0; +out_unlock: + mutex_unlock(&q->mutex); + return ret; } static const struct file_operations uacce_fops = { @@ -289,7 +319,7 @@ static ssize_t api_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%s\n", uacce->api_ver); + return sysfs_emit(buf, "%s\n", uacce->api_ver); } static ssize_t flags_show(struct device *dev, @@ -297,7 +327,7 @@ static ssize_t flags_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%u\n", uacce->flags); + return sysfs_emit(buf, "%u\n", uacce->flags); } static ssize_t available_instances_show(struct device *dev, @@ -309,7 +339,7 @@ static ssize_t available_instances_show(struct device *dev, if (!uacce->ops->get_available_instances) return -ENODEV; - return sprintf(buf, "%d\n", + return sysfs_emit(buf, "%d\n", uacce->ops->get_available_instances(uacce)); } @@ -318,7 +348,7 @@ static ssize_t algorithms_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%s\n", uacce->algs); + return sysfs_emit(buf, "%s\n", uacce->algs); } static ssize_t region_mmio_size_show(struct device *dev, @@ -326,7 +356,7 @@ static ssize_t region_mmio_size_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%lu\n", + return sysfs_emit(buf, "%lu\n", uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT); } @@ -335,16 +365,56 @@ static ssize_t region_dus_size_show(struct device *dev, { struct uacce_device *uacce = to_uacce_device(dev); - return sprintf(buf, "%lu\n", + return sysfs_emit(buf, "%lu\n", uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT); } +static ssize_t isolate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce)); +} + +static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + u32 val; + + val = uacce->ops->isolate_err_threshold_read(uacce); + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uacce_device *uacce = to_uacce_device(dev); + unsigned long val; + int ret; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val > UACCE_MAX_ERR_THRESHOLD) + return -EINVAL; + + ret = uacce->ops->isolate_err_threshold_write(uacce, val); + if (ret) + return ret; + + return count; +} + static DEVICE_ATTR_RO(api); static DEVICE_ATTR_RO(flags); static DEVICE_ATTR_RO(available_instances); static DEVICE_ATTR_RO(algorithms); static DEVICE_ATTR_RO(region_mmio_size); static DEVICE_ATTR_RO(region_dus_size); +static DEVICE_ATTR_RO(isolate); +static DEVICE_ATTR_RW(isolate_strategy); static struct attribute *uacce_dev_attrs[] = { &dev_attr_api.attr, @@ -353,6 +423,8 @@ static struct attribute *uacce_dev_attrs[] = { &dev_attr_algorithms.attr, &dev_attr_region_mmio_size.attr, &dev_attr_region_dus_size.attr, + &dev_attr_isolate.attr, + &dev_attr_isolate_strategy.attr, NULL, }; @@ -368,6 +440,14 @@ static umode_t uacce_dev_is_visible(struct kobject *kobj, (!uacce->qf_pg_num[UACCE_QFRT_DUS]))) return 0; + if (attr == &dev_attr_isolate_strategy.attr && + (!uacce->ops->isolate_err_threshold_read && + !uacce->ops->isolate_err_threshold_write)) + return 0; + + if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state) + return 0; + return attr->mode; } @@ -385,40 +465,6 @@ static void uacce_release(struct device *dev) kfree(uacce); } -static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags) -{ - int ret; - - if (!(flags & UACCE_DEV_SVA)) - return flags; - - flags &= ~UACCE_DEV_SVA; - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF); - if (ret) { - dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret)); - return flags; - } - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA); - if (ret) { - dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret)); - iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF); - return flags; - } - - return flags | UACCE_DEV_SVA; -} - -static void uacce_disable_sva(struct uacce_device *uacce) -{ - if (!(uacce->flags & UACCE_DEV_SVA)) - return; - - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF); -} - /** * uacce_alloc() - alloc an accelerator * @parent: pointer of uacce parent device @@ -438,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent, if (!uacce) return ERR_PTR(-ENOMEM); - flags = uacce_enable_sva(parent, flags); - uacce->parent = parent; uacce->flags = flags; uacce->ops = interface->ops; @@ -450,10 +494,10 @@ struct uacce_device *uacce_alloc(struct device *parent, goto err_with_uacce; INIT_LIST_HEAD(&uacce->queues); - mutex_init(&uacce->queues_lock); + mutex_init(&uacce->mutex); device_initialize(&uacce->dev); uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id); - uacce->dev.class = uacce_class; + uacce->dev.class = &uacce_class; uacce->dev.groups = uacce_dev_groups; uacce->dev.parent = uacce->parent; uacce->dev.release = uacce_release; @@ -462,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent, return uacce; err_with_uacce: - uacce_disable_sva(uacce); kfree(uacce); return ERR_PTR(ret); } @@ -500,27 +543,41 @@ void uacce_remove(struct uacce_device *uacce) if (!uacce) return; + /* - * unmap remaining mapping from user space, preventing user still - * access the mmaped area while parent device is already removed + * uacce_fops_open() may be running concurrently, even after we remove + * the cdev. Holding uacce->mutex ensures that open() does not obtain a + * removed uacce device. */ - if (uacce->inode) - unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1); - + mutex_lock(&uacce->mutex); /* ensure no open queue remains */ - mutex_lock(&uacce->queues_lock); list_for_each_entry_safe(q, next_q, &uacce->queues, list) { + /* + * Taking q->mutex ensures that fops do not use the defunct + * uacce->ops after the queue is disabled. + */ + mutex_lock(&q->mutex); uacce_put_queue(q); + mutex_unlock(&q->mutex); uacce_unbind_queue(q); - } - mutex_unlock(&uacce->queues_lock); - /* disable sva now since no opened queues */ - uacce_disable_sva(uacce); + /* + * unmap remaining mapping from user space, preventing user still + * access the mmaped area while parent device is already removed + */ + unmap_mapping_range(q->mapping, 0, 0, 1); + } if (uacce->cdev) cdev_device_del(uacce->cdev, &uacce->dev); xa_erase(&uacce_xa, uacce->dev_id); + /* + * uacce exists as long as there are open fds, but ops will be freed + * now. Ensure that bugs cause NULL deref rather than use-after-free. + */ + uacce->ops = NULL; + uacce->parent = NULL; + mutex_unlock(&uacce->mutex); put_device(&uacce->dev); } EXPORT_SYMBOL_GPL(uacce_remove); @@ -529,13 +586,13 @@ static int __init uacce_init(void) { int ret; - uacce_class = class_create(THIS_MODULE, UACCE_NAME); - if (IS_ERR(uacce_class)) - return PTR_ERR(uacce_class); + ret = class_register(&uacce_class); + if (ret) + return ret; ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME); if (ret) - class_destroy(uacce_class); + class_unregister(&uacce_class); return ret; } @@ -543,7 +600,7 @@ static int __init uacce_init(void) static __exit void uacce_exit(void) { unregister_chrdev_region(uacce_devt, MINORMASK); - class_destroy(uacce_class); + class_unregister(&uacce_class); } subsys_initcall(uacce_init); |
