diff options
Diffstat (limited to 'drivers/uio/uio.c')
| -rw-r--r-- | drivers/uio/uio.c | 542 |
1 files changed, 398 insertions, 144 deletions
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 3b96f18593b3..d93ed4e86a17 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * drivers/uio/uio.c * @@ -9,8 +10,6 @@ * Userspace IO * * Base Functions - * - * Licensed under the GPLv2 only. */ #include <linux/module.h> @@ -20,27 +19,15 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> #include <linux/uio_driver.h> +#include <linux/dma-mapping.h> #define UIO_MAX_DEVICES (1U << MINORBITS) -struct uio_device { - struct module *owner; - struct device *dev; - int minor; - atomic_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; - int vma_count; - struct uio_info *info; - struct kobject *map_dir; - struct kobject *portio_dir; -}; - static int uio_major; static struct cdev *uio_cdev; static DEFINE_IDR(uio_idr); @@ -69,17 +56,17 @@ static ssize_t map_name_show(struct uio_mem *mem, char *buf) static ssize_t map_addr_show(struct uio_mem *mem, char *buf) { - return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr); + return sprintf(buf, "%pa\n", &mem->addr); } static ssize_t map_size_show(struct uio_mem *mem, char *buf) { - return sprintf(buf, "0x%lx\n", mem->size); + return sprintf(buf, "%pa\n", &mem->size); } static ssize_t map_offset_show(struct uio_mem *mem, char *buf) { - return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK); + return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs); } struct map_sysfs_entry { @@ -97,13 +84,14 @@ static struct map_sysfs_entry size_attribute = static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); -static struct attribute *attrs[] = { +static struct attribute *map_attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; +ATTRIBUTE_GROUPS(map); static void map_release(struct kobject *kobj) { @@ -130,10 +118,10 @@ static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; -static struct kobj_type map_attr_type = { +static const struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, - .default_attrs = attrs, + .default_groups = map_groups, }; struct uio_portio { @@ -192,6 +180,7 @@ static struct attribute *portio_attrs[] = { &portio_porttype_attribute.attr, NULL, }; +ATTRIBUTE_GROUPS(portio); static void portio_release(struct kobject *kobj) { @@ -218,46 +207,78 @@ static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; -static struct kobj_type portio_attr_type = { +static const struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, - .default_attrs = portio_attrs, + .default_groups = portio_groups, }; -static ssize_t show_name(struct device *dev, +static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->name); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->name); + +out: + mutex_unlock(&idev->info_lock); + return ret; } +static DEVICE_ATTR_RO(name); -static ssize_t show_version(struct device *dev, +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->version); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->version); + +out: + mutex_unlock(&idev->info_lock); + return ret; } +static DEVICE_ATTR_RO(version); -static ssize_t show_event(struct device *dev, +static ssize_t event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); } +static DEVICE_ATTR_RO(event); -static struct device_attribute uio_class_attributes[] = { - __ATTR(name, S_IRUGO, show_name, NULL), - __ATTR(version, S_IRUGO, show_version, NULL), - __ATTR(event, S_IRUGO, show_event, NULL), - {} +static struct attribute *uio_attrs[] = { + &dev_attr_name.attr, + &dev_attr_version.attr, + &dev_attr_event.attr, + NULL, }; +ATTRIBUTE_GROUPS(uio); /* UIO class infrastructure */ static struct class uio_class = { .name = "uio", - .dev_attrs = uio_class_attributes, + .dev_groups = uio_groups, }; +static bool uio_class_registered; + /* * device functions */ @@ -279,22 +300,26 @@ static int uio_dev_add_attributes(struct uio_device *idev) if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", - &idev->dev->kobj); - if (!idev->map_dir) + &idev->dev.kobj); + if (!idev->map_dir) { + ret = -ENOMEM; goto err_map; + } } map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) + if (!map) { + ret = -ENOMEM; goto err_map; + } kobject_init(&map->kobj, &map_attr_type); map->mem = mem; mem->map = map; ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); if (ret) - goto err_map; + goto err_map_kobj; ret = kobject_uevent(&map->kobj, KOBJ_ADD); if (ret) - goto err_map; + goto err_map_kobj; } for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { @@ -304,42 +329,50 @@ static int uio_dev_add_attributes(struct uio_device *idev) if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", - &idev->dev->kobj); - if (!idev->portio_dir) + &idev->dev.kobj); + if (!idev->portio_dir) { + ret = -ENOMEM; goto err_portio; + } } portio = kzalloc(sizeof(*portio), GFP_KERNEL); - if (!portio) + if (!portio) { + ret = -ENOMEM; goto err_portio; + } kobject_init(&portio->kobj, &portio_attr_type); portio->port = port; port->portio = portio; ret = kobject_add(&portio->kobj, idev->portio_dir, "port%d", pi); if (ret) - goto err_portio; + goto err_portio_kobj; ret = kobject_uevent(&portio->kobj, KOBJ_ADD); if (ret) - goto err_portio; + goto err_portio_kobj; } return 0; err_portio: - for (pi--; pi >= 0; pi--) { + pi--; +err_portio_kobj: + for (; pi >= 0; pi--) { port = &idev->info->port[pi]; portio = port->portio; kobject_put(&portio->kobj); } kobject_put(idev->portio_dir); err_map: - for (mi--; mi>=0; mi--) { + mi--; +err_map_kobj: + for (; mi >= 0; mi--) { mem = &idev->info->mem[mi]; map = mem->map; kobject_put(&map->kobj); } kobject_put(idev->map_dir); - dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); + dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } @@ -368,7 +401,7 @@ static void uio_dev_del_attributes(struct uio_device *idev) static int uio_get_minor(struct uio_device *idev) { - int retval = -ENOMEM; + int retval; mutex_lock(&minor_lock); retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); @@ -376,17 +409,17 @@ static int uio_get_minor(struct uio_device *idev) idev->minor = retval; retval = 0; } else if (retval == -ENOSPC) { - dev_err(idev->dev, "too many uio devices\n"); + dev_err(&idev->dev, "too many uio devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); return retval; } -static void uio_free_minor(struct uio_device *idev) +static void uio_free_minor(unsigned long minor) { mutex_lock(&minor_lock); - idr_remove(&uio_idr, idev->minor); + idr_remove(&uio_idr, minor); mutex_unlock(&minor_lock); } @@ -405,21 +438,36 @@ void uio_event_notify(struct uio_info *info) EXPORT_SYMBOL_GPL(uio_event_notify); /** - * uio_interrupt - hardware interrupt handler + * uio_interrupt_handler - hardware interrupt handler * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer * @dev_id: Pointer to the devices uio_device structure */ -static irqreturn_t uio_interrupt(int irq, void *dev_id) +static irqreturn_t uio_interrupt_handler(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; - irqreturn_t ret = idev->info->handler(irq, idev->info); + irqreturn_t ret; + ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) - uio_event_notify(idev->info); + ret = IRQ_WAKE_THREAD; return ret; } +/** + * uio_interrupt_thread - irq thread handler + * @irq: IRQ number + * @dev_id: Pointer to the devices uio_device structure + */ +static irqreturn_t uio_interrupt_thread(int irq, void *dev_id) +{ + struct uio_device *idev = (struct uio_device *)dev_id; + + uio_event_notify(idev->info); + + return IRQ_HANDLED; +} + struct uio_listener { struct uio_device *dev; s32 event_count; @@ -433,15 +481,17 @@ static int uio_open(struct inode *inode, struct file *filep) mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); - mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; + mutex_unlock(&minor_lock); goto out; } + get_device(&idev->dev); + mutex_unlock(&minor_lock); if (!try_module_get(idev->owner)) { ret = -ENODEV; - goto out; + goto err_module_get; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); @@ -454,11 +504,19 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - if (idev->info->open) { - ret = idev->info->open(idev->info, inode); - if (ret) - goto err_infoopen; + mutex_lock(&idev->info_lock); + if (!idev->info) { + mutex_unlock(&idev->info_lock); + ret = -EINVAL; + goto err_infoopen; } + + if (idev->info->open) + ret = idev->info->open(idev->info, inode); + mutex_unlock(&idev->info_lock); + if (ret) + goto err_infoopen; + return 0; err_infoopen: @@ -467,6 +525,9 @@ err_infoopen: err_alloc_listener: module_put(idev->owner); +err_module_get: + put_device(&idev->dev); + out: return ret; } @@ -485,25 +546,34 @@ static int uio_release(struct inode *inode, struct file *filep) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - if (idev->info->release) + mutex_lock(&idev->info_lock); + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); + mutex_unlock(&idev->info_lock); module_put(idev->owner); kfree(listener); + put_device(&idev->dev); return ret; } -static unsigned int uio_poll(struct file *filep, poll_table *wait) +static __poll_t uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; + __poll_t ret = 0; - if (!idev->info->irq) - return -EIO; + mutex_lock(&idev->info_lock); + if (!idev->info || !idev->info->irq) + ret = EPOLLERR; + mutex_unlock(&idev->info_lock); + + if (ret) + return ret; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } @@ -513,22 +583,28 @@ static ssize_t uio_read(struct file *filep, char __user *buf, struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); - ssize_t retval; + ssize_t retval = 0; s32 event_count; - if (!idev->info->irq) - return -EIO; - if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { + mutex_lock(&idev->info_lock); + if (!idev->info || !idev->info->irq) { + retval = -EIO; + mutex_unlock(&idev->info_lock); + break; + } + mutex_unlock(&idev->info_lock); + set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); if (event_count != listener->event_count) { + __set_current_state(TASK_RUNNING); if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; else { @@ -564,20 +640,32 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, ssize_t retval; s32 irq_on; - if (!idev->info->irq) - return -EIO; - if (count != sizeof(s32)) return -EINVAL; - if (!idev->info->irqcontrol) - return -ENOSYS; - if (copy_from_user(&irq_on, buf, count)) return -EFAULT; + mutex_lock(&idev->info_lock); + if (!idev->info) { + retval = -EINVAL; + goto out; + } + + if (!idev->info->irq) { + retval = -EIO; + goto out; + } + + if (!idev->info->irqcontrol) { + retval = -ENOSYS; + goto out; + } + retval = idev->info->irqcontrol(idev->info, irq_on); +out: + mutex_unlock(&idev->info_lock); return retval ? retval : sizeof(s32); } @@ -593,27 +681,26 @@ static int uio_find_mem_index(struct vm_area_struct *vma) return -1; } -static void uio_vma_open(struct vm_area_struct *vma) +static vm_fault_t uio_vma_fault(struct vm_fault *vmf) { - struct uio_device *idev = vma->vm_private_data; - idev->vma_count++; -} - -static void uio_vma_close(struct vm_area_struct *vma) -{ - struct uio_device *idev = vma->vm_private_data; - idev->vma_count--; -} - -static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct uio_device *idev = vma->vm_private_data; + struct uio_device *idev = vmf->vma->vm_private_data; struct page *page; unsigned long offset; + void *addr; + vm_fault_t ret = 0; + int mi; - int mi = uio_find_mem_index(vma); - if (mi < 0) - return VM_FAULT_SIGBUS; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = VM_FAULT_SIGBUS; + goto out; + } + + mi = uio_find_mem_index(vmf->vma); + if (mi < 0) { + ret = VM_FAULT_SIGBUS; + goto out; + } /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE @@ -621,43 +708,113 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) */ offset = (vmf->pgoff - mi) << PAGE_SHIFT; + addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) - page = virt_to_page(idev->info->mem[mi].addr + offset); + page = virt_to_page(addr); else - page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset); + page = vmalloc_to_page(addr); get_page(page); vmf->page = page; - return 0; + +out: + mutex_unlock(&idev->info_lock); + + return ret; } -static const struct vm_operations_struct uio_vm_ops = { - .open = uio_vma_open, - .close = uio_vma_close, +static const struct vm_operations_struct uio_logical_vm_ops = { .fault = uio_vma_fault, }; +static int uio_mmap_logical(struct vm_area_struct *vma) +{ + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &uio_logical_vm_ops; + return 0; +} + +static const struct vm_operations_struct uio_physical_vm_ops = { +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys, +#endif +}; + static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); + struct uio_mem *mem; + if (mi < 0) return -EINVAL; + mem = idev->info->mem + mi; + + if (mem->addr & ~PAGE_MASK) + return -ENODEV; + if (vma->vm_end - vma->vm_start > mem->size) + return -EINVAL; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &uio_physical_vm_ops; + if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + /* + * We cannot use the vm_iomap_memory() helper here, + * because vma->vm_pgoff is the map index we looked + * up above in uio_find_mem_index(), rather than an + * actual page offset into the mmap. + * + * So we just do the physical mmap without a page + * offset. + */ return remap_pfn_range(vma, vma->vm_start, - idev->info->mem[mi].addr >> PAGE_SHIFT, + mem->addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } -static int uio_mmap_logical(struct vm_area_struct *vma) +static int uio_mmap_dma_coherent(struct vm_area_struct *vma) { - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_ops = &uio_vm_ops; - uio_vma_open(vma); - return 0; + struct uio_device *idev = vma->vm_private_data; + struct uio_mem *mem; + void *addr; + int ret = 0; + int mi; + + mi = uio_find_mem_index(vma); + if (mi < 0) + return -EINVAL; + + mem = idev->info->mem + mi; + + if (mem->addr & ~PAGE_MASK) + return -ENODEV; + if (mem->dma_addr & ~PAGE_MASK) + return -ENODEV; + if (!mem->dma_device) + return -ENODEV; + if (vma->vm_end - vma->vm_start > mem->size) + return -EINVAL; + + dev_warn(mem->dma_device, + "use of UIO_MEM_DMA_COHERENT is highly discouraged"); + + /* + * UIO uses offset to index into the maps for a device. + * We need to clear vm_pgoff for dma_mmap_coherent. + */ + vma->vm_pgoff = 0; + + addr = (void *)(uintptr_t)mem->addr; + ret = dma_mmap_coherent(mem->dma_device, + vma, + addr, + mem->dma_addr, + vma->vm_end - vma->vm_start); + vma->vm_pgoff = mi; + + return ret; } static int uio_mmap(struct file *filep, struct vm_area_struct *vma) @@ -673,30 +830,50 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + goto out; + } + mi = uio_find_mem_index(vma); - if (mi < 0) - return -EINVAL; + if (mi < 0) { + ret = -EINVAL; + goto out; + } requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; - if (requested_pages > actual_pages) - return -EINVAL; + if (requested_pages > actual_pages) { + ret = -EINVAL; + goto out; + } if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); - return ret; + goto out; } switch (idev->info->mem[mi].memtype) { - case UIO_MEM_PHYS: - return uio_mmap_physical(vma); - case UIO_MEM_LOGICAL: - case UIO_MEM_VIRTUAL: - return uio_mmap_logical(vma); - default: - return -EINVAL; + case UIO_MEM_IOVA: + case UIO_MEM_PHYS: + ret = uio_mmap_physical(vma); + break; + case UIO_MEM_LOGICAL: + case UIO_MEM_VIRTUAL: + ret = uio_mmap_logical(vma); + break; + case UIO_MEM_DMA_COHERENT: + ret = uio_mmap_dma_coherent(vma); + break; + default: + ret = -EINVAL; } + + out: + mutex_unlock(&idev->info_lock); + return ret; } static const struct file_operations uio_fops = { @@ -766,6 +943,9 @@ static int init_uio_class(void) printk(KERN_ERR "class_register failed for uio\n"); goto err_class_register; } + + uio_class_registered = true; + return 0; err_class_register: @@ -776,12 +956,20 @@ exit: static void release_uio_class(void) { + uio_class_registered = false; class_unregister(&uio_class); uio_major_cleanup(); } +static void uio_device_release(struct device *dev) +{ + struct uio_device *idev = dev_get_drvdata(dev); + + kfree(idev); +} + /** - * uio_register_device - register a new userspace IO device + * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities @@ -795,6 +983,9 @@ int __uio_register_device(struct module *owner, struct uio_device *idev; int ret = 0; + if (!uio_class_registered) + return -EPROBE_DEFER; + if (!parent || !info || !info->name || !info->version) return -EINVAL; @@ -802,27 +993,35 @@ int __uio_register_device(struct module *owner, idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { - ret = -ENOMEM; - goto err_kzalloc; + return -ENOMEM; } idev->owner = owner; idev->info = info; + mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); ret = uio_get_minor(idev); + if (ret) { + kfree(idev); + return ret; + } + + device_initialize(&idev->dev); + idev->dev.devt = MKDEV(uio_major, idev->minor); + idev->dev.class = &uio_class; + idev->dev.parent = parent; + idev->dev.release = uio_device_release; + dev_set_drvdata(&idev->dev, idev); + + ret = dev_set_name(&idev->dev, "uio%d", idev->minor); + if (ret) + goto err_device_create; + + ret = device_add(&idev->dev); if (ret) - goto err_get_minor; - - idev->dev = device_create(&uio_class, parent, - MKDEV(uio_major, idev->minor), idev, - "uio%d", idev->minor); - if (IS_ERR(idev->dev)) { - printk(KERN_ERR "UIO: device register failed\n"); - ret = PTR_ERR(idev->dev); goto err_device_create; - } ret = uio_dev_add_attributes(idev); if (ret) @@ -831,10 +1030,20 @@ int __uio_register_device(struct module *owner, info->uio_dev = idev; if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { - ret = request_irq(info->irq, uio_interrupt, - info->irq_flags, info->name, idev); - if (ret) + /* + * Note that we deliberately don't use devm_request_irq + * here. The parent module can unregister the UIO device + * and call pci_disable_msi, which requires that this + * irq has been freed. However, the device may have open + * FDs at the time of unregister and therefore may not be + * freed until they are released. + */ + ret = request_threaded_irq(info->irq, uio_interrupt_handler, uio_interrupt_thread, + info->irq_flags, info->name, idev); + if (ret) { + info->uio_dev = NULL; goto err_request_irq; + } } return 0; @@ -842,16 +1051,52 @@ int __uio_register_device(struct module *owner, err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: - device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); + device_del(&idev->dev); err_device_create: - uio_free_minor(idev); -err_get_minor: - kfree(idev); -err_kzalloc: + uio_free_minor(idev->minor); + put_device(&idev->dev); return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); +static void devm_uio_unregister_device(struct device *dev, void *res) +{ + uio_unregister_device(*(struct uio_info **)res); +} + +/** + * __devm_uio_register_device - Resource managed uio_register_device() + * @owner: module that creates the new device + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ +int __devm_uio_register_device(struct module *owner, + struct device *parent, + struct uio_info *info) +{ + struct uio_info **ptr; + int ret; + + ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + *ptr = info; + ret = __uio_register_device(owner, parent, info); + if (ret) { + devres_free(ptr); + return ret; + } + + devres_add(parent, ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(__devm_uio_register_device); + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities @@ -860,21 +1105,28 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; + unsigned long minor; if (!info || !info->uio_dev) return; idev = info->uio_dev; + minor = idev->minor; - uio_free_minor(idev); + mutex_lock(&idev->info_lock); + uio_dev_del_attributes(idev); - if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) + if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); - uio_dev_del_attributes(idev); + idev->info = NULL; + mutex_unlock(&idev->info_lock); - device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); - kfree(idev); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); + + uio_free_minor(minor); + device_unregister(&idev->dev); return; } @@ -888,8 +1140,10 @@ static int __init uio_init(void) static void __exit uio_exit(void) { release_uio_class(); + idr_destroy(&uio_idr); } module_init(uio_init) module_exit(uio_exit) +MODULE_DESCRIPTION("Userspace IO core module"); MODULE_LICENSE("GPL v2"); |
