diff options
Diffstat (limited to 'drivers/infiniband/core/uverbs_main.c')
| -rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 1427 |
1 files changed, 928 insertions, 499 deletions
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2c6f0f2ecd9d..973fe2c7ef53 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -45,10 +45,18 @@ #include <linux/cdev.h> #include <linux/anon_inodes.h> #include <linux/slab.h> +#include <linux/sched/mm.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> + +#include <rdma/ib.h> +#include <rdma/uverbs_std_types.h> +#include <rdma/rdma_netlink.h> +#include <rdma/ib_ucaps.h> #include "uverbs.h" +#include "core_priv.h" +#include "rdma_core.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); @@ -57,124 +65,120 @@ MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UVERBS_MAJOR = 231, IB_UVERBS_BASE_MINOR = 192, - IB_UVERBS_MAX_DEVICES = 32 + IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS, + IB_UVERBS_NUM_FIXED_MINOR = 32, + IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR, }; #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) -static struct class *uverbs_class; - -DEFINE_SPINLOCK(ib_uverbs_idr_lock); -DEFINE_IDR(ib_uverbs_pd_idr); -DEFINE_IDR(ib_uverbs_mr_idr); -DEFINE_IDR(ib_uverbs_mw_idr); -DEFINE_IDR(ib_uverbs_ah_idr); -DEFINE_IDR(ib_uverbs_cq_idr); -DEFINE_IDR(ib_uverbs_qp_idr); -DEFINE_IDR(ib_uverbs_srq_idr); -DEFINE_IDR(ib_uverbs_xrcd_idr); - -static DEFINE_SPINLOCK(map_lock); -static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); - -static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) = { - [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, - [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, - [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, - [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, - [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, - [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, - [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, - [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, - [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, - [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, - [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, - [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, - [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, - [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, - [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, - [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, - [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, - [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, - [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, - [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, - [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, - [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, - [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, - [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, - [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, - [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, - [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, - [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, - [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, - [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, - [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, - [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, - [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, - [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp +static dev_t dynamic_uverbs_dev; + +static DEFINE_IDA(uverbs_ida); +static int ib_uverbs_add_one(struct ib_device *device); +static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); +static struct ib_client uverbs_client; + +static char *uverbs_devnode(const struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0666; + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); +} + +static const struct class uverbs_class = { + .name = "infiniband_verbs", + .devnode = uverbs_devnode, }; -static void ib_uverbs_add_one(struct ib_device *device); -static void ib_uverbs_remove_one(struct ib_device *device); +/* + * Must be called with the ufile->device->disassociate_srcu held, and the lock + * must be held until use of the ucontext is finished. + */ +struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) +{ + /* + * We do not hold the hw_destroy_rwsem lock for this flow, instead + * srcu is used. It does not matter if someone races this with + * get_context, we get NULL or valid ucontext. + */ + struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); + + if (!srcu_dereference(ufile->device->ib_dev, + &ufile->device->disassociate_srcu)) + return ERR_PTR(-EIO); + + if (!ucontext) + return ERR_PTR(-EINVAL); -static void ib_uverbs_release_dev(struct kref *ref) + return ucontext; +} +EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); + +int uverbs_dealloc_mw(struct ib_mw *mw) { - struct ib_uverbs_device *dev = - container_of(ref, struct ib_uverbs_device, ref); + struct ib_pd *pd = mw->pd; + int ret; - complete(&dev->comp); + ret = mw->device->ops.dealloc_mw(mw); + if (ret) + return ret; + + atomic_dec(&pd->usecnt); + kfree(mw); + return ret; } -static void ib_uverbs_release_event_file(struct kref *ref) +static void ib_uverbs_release_dev(struct device *device) { - struct ib_uverbs_event_file *file = - container_of(ref, struct ib_uverbs_event_file, ref); + struct ib_uverbs_device *dev = + container_of(device, struct ib_uverbs_device, dev); - kfree(file); + uverbs_destroy_api(dev->uapi); + cleanup_srcu_struct(&dev->disassociate_srcu); + mutex_destroy(&dev->lists_mutex); + mutex_destroy(&dev->xrcd_tree_mutex); + kfree(dev); } -void ib_uverbs_release_ucq(struct ib_uverbs_file *file, - struct ib_uverbs_event_file *ev_file, - struct ib_ucq_object *uobj) +void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file, + struct ib_ucq_object *uobj) { struct ib_uverbs_event *evt, *tmp; if (ev_file) { - spin_lock_irq(&ev_file->lock); + spin_lock_irq(&ev_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { list_del(&evt->list); kfree(evt); } - spin_unlock_irq(&ev_file->lock); + spin_unlock_irq(&ev_file->ev_queue.lock); - kref_put(&ev_file->ref, ib_uverbs_release_event_file); + uverbs_uobject_put(&ev_file->uobj); } - spin_lock_irq(&file->async_file->lock); - list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { - list_del(&evt->list); - kfree(evt); - } - spin_unlock_irq(&file->async_file->lock); + ib_uverbs_release_uevent(&uobj->uevent); } -void ib_uverbs_release_uevent(struct ib_uverbs_file *file, - struct ib_uevent_object *uobj) +void ib_uverbs_release_uevent(struct ib_uevent_object *uobj) { + struct ib_uverbs_async_event_file *async_file = uobj->event_file; struct ib_uverbs_event *evt, *tmp; - spin_lock_irq(&file->async_file->lock); + if (!async_file) + return; + + spin_lock_irq(&async_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { list_del(&evt->list); kfree(evt); } - spin_unlock_irq(&file->async_file->lock); + spin_unlock_irq(&async_file->ev_queue.lock); + uverbs_uobject_put(&async_file->uobj); } -static void ib_uverbs_detach_umcast(struct ib_qp *qp, - struct ib_uqp_object *uobj) +void ib_uverbs_detach_umcast(struct ib_qp *qp, + struct ib_uqp_object *uobj) { struct ib_uverbs_mcast_entry *mcast, *tmp; @@ -185,156 +189,84 @@ static void ib_uverbs_detach_umcast(struct ib_qp *qp, } } -static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, - struct ib_ucontext *context) +static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) { - struct ib_uobject *uobj, *tmp; - - if (!context) - return 0; - - context->closing = 1; - - list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { - struct ib_ah *ah = uobj->object; - - idr_remove_uobj(&ib_uverbs_ah_idr, uobj); - ib_destroy_ah(ah); - kfree(uobj); - } - - /* Remove MWs before QPs, in order to support type 2A MWs. */ - list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) { - struct ib_mw *mw = uobj->object; - - idr_remove_uobj(&ib_uverbs_mw_idr, uobj); - ib_dealloc_mw(mw); - kfree(uobj); - } - - list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { - struct ib_qp *qp = uobj->object; - struct ib_uqp_object *uqp = - container_of(uobj, struct ib_uqp_object, uevent.uobject); - - idr_remove_uobj(&ib_uverbs_qp_idr, uobj); - if (qp != qp->real_qp) { - ib_close_qp(qp); - } else { - ib_uverbs_detach_umcast(qp, uqp); - ib_destroy_qp(qp); - } - ib_uverbs_release_uevent(file, &uqp->uevent); - kfree(uqp); - } - - list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { - struct ib_cq *cq = uobj->object; - struct ib_uverbs_event_file *ev_file = cq->cq_context; - struct ib_ucq_object *ucq = - container_of(uobj, struct ib_ucq_object, uobject); - - idr_remove_uobj(&ib_uverbs_cq_idr, uobj); - ib_destroy_cq(cq); - ib_uverbs_release_ucq(file, ev_file, ucq); - kfree(ucq); - } - - list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { - struct ib_srq *srq = uobj->object; - struct ib_uevent_object *uevent = - container_of(uobj, struct ib_uevent_object, uobject); - - idr_remove_uobj(&ib_uverbs_srq_idr, uobj); - ib_destroy_srq(srq); - ib_uverbs_release_uevent(file, uevent); - kfree(uevent); - } - - list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { - struct ib_mr *mr = uobj->object; - - idr_remove_uobj(&ib_uverbs_mr_idr, uobj); - ib_dereg_mr(mr); - kfree(uobj); - } - - mutex_lock(&file->device->xrcd_tree_mutex); - list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) { - struct ib_xrcd *xrcd = uobj->object; - struct ib_uxrcd_object *uxrcd = - container_of(uobj, struct ib_uxrcd_object, uobject); - - idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); - ib_uverbs_dealloc_xrcd(file->device, xrcd); - kfree(uxrcd); - } - mutex_unlock(&file->device->xrcd_tree_mutex); - - list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { - struct ib_pd *pd = uobj->object; - - idr_remove_uobj(&ib_uverbs_pd_idr, uobj); - ib_dealloc_pd(pd); - kfree(uobj); - } - - return context->device->dealloc_ucontext(context); + complete(&dev->comp); } -static void ib_uverbs_release_file(struct kref *ref) +void ib_uverbs_release_file(struct kref *ref) { struct ib_uverbs_file *file = container_of(ref, struct ib_uverbs_file, ref); - - module_put(file->device->ib_dev->owner); - kref_put(&file->device->ref, ib_uverbs_release_dev); - + struct ib_device *ib_dev; + int srcu_key; + + release_ufile_idr_uobject(file); + + srcu_key = srcu_read_lock(&file->device->disassociate_srcu); + ib_dev = srcu_dereference(file->device->ib_dev, + &file->device->disassociate_srcu); + if (ib_dev && !ib_dev->ops.disassociate_ucontext) + module_put(ib_dev->ops.owner); + srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); + + if (refcount_dec_and_test(&file->device->refcount)) + ib_uverbs_comp_dev(file->device); + + if (file->default_async_file) + uverbs_uobject_put(&file->default_async_file->uobj); + put_device(&file->device->dev); + + if (file->disassociate_page) + __free_pages(file->disassociate_page, 0); + mutex_destroy(&file->disassociation_lock); + mutex_destroy(&file->umap_lock); + mutex_destroy(&file->ucontext_lock); kfree(file); } -static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf, - size_t count, loff_t *pos) +static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, + struct file *filp, char __user *buf, + size_t count, loff_t *pos, + size_t eventsz) { - struct ib_uverbs_event_file *file = filp->private_data; struct ib_uverbs_event *event; - int eventsz; int ret = 0; - spin_lock_irq(&file->lock); + spin_lock_irq(&ev_queue->lock); - while (list_empty(&file->event_list)) { - spin_unlock_irq(&file->lock); + while (list_empty(&ev_queue->event_list)) { + if (ev_queue->is_closed) { + spin_unlock_irq(&ev_queue->lock); + return -EIO; + } + spin_unlock_irq(&ev_queue->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; - if (wait_event_interruptible(file->poll_wait, - !list_empty(&file->event_list))) + if (wait_event_interruptible(ev_queue->poll_wait, + (!list_empty(&ev_queue->event_list) || + ev_queue->is_closed))) return -ERESTARTSYS; - spin_lock_irq(&file->lock); + spin_lock_irq(&ev_queue->lock); } - event = list_entry(file->event_list.next, struct ib_uverbs_event, list); - - if (file->is_async) - eventsz = sizeof (struct ib_uverbs_async_event_desc); - else - eventsz = sizeof (struct ib_uverbs_comp_event_desc); + event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); if (eventsz > count) { ret = -EINVAL; event = NULL; } else { - list_del(file->event_list.next); + list_del(ev_queue->event_list.next); if (event->counter) { ++(*event->counter); list_del(&event->obj_list); } } - spin_unlock_irq(&file->lock); + spin_unlock_irq(&ev_queue->lock); if (event) { if (copy_to_user(buf, event, eventsz)) @@ -348,269 +280,642 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf, return ret; } -static unsigned int ib_uverbs_event_poll(struct file *filp, +static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct ib_uverbs_async_event_file *file = filp->private_data; + + return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos, + sizeof(struct ib_uverbs_async_event_desc)); +} + +static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct ib_uverbs_completion_event_file *comp_ev_file = + filp->private_data; + + return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count, + pos, + sizeof(struct ib_uverbs_comp_event_desc)); +} + +static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, + struct file *filp, struct poll_table_struct *wait) { - unsigned int pollflags = 0; - struct ib_uverbs_event_file *file = filp->private_data; + __poll_t pollflags = 0; - poll_wait(filp, &file->poll_wait, wait); + poll_wait(filp, &ev_queue->poll_wait, wait); - spin_lock_irq(&file->lock); - if (!list_empty(&file->event_list)) - pollflags = POLLIN | POLLRDNORM; - spin_unlock_irq(&file->lock); + spin_lock_irq(&ev_queue->lock); + if (!list_empty(&ev_queue->event_list)) + pollflags = EPOLLIN | EPOLLRDNORM; + else if (ev_queue->is_closed) + pollflags = EPOLLERR; + spin_unlock_irq(&ev_queue->lock); return pollflags; } -static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) +static __poll_t ib_uverbs_async_event_poll(struct file *filp, + struct poll_table_struct *wait) { - struct ib_uverbs_event_file *file = filp->private_data; + struct ib_uverbs_async_event_file *file = filp->private_data; - return fasync_helper(fd, filp, on, &file->async_queue); + return ib_uverbs_event_poll(&file->ev_queue, filp, wait); } -static int ib_uverbs_event_close(struct inode *inode, struct file *filp) +static __poll_t ib_uverbs_comp_event_poll(struct file *filp, + struct poll_table_struct *wait) { - struct ib_uverbs_event_file *file = filp->private_data; - struct ib_uverbs_event *entry, *tmp; + struct ib_uverbs_completion_event_file *comp_ev_file = + filp->private_data; - spin_lock_irq(&file->lock); - file->is_closed = 1; - list_for_each_entry_safe(entry, tmp, &file->event_list, list) { - if (entry->counter) - list_del(&entry->obj_list); - kfree(entry); - } - spin_unlock_irq(&file->lock); + return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait); +} - if (file->is_async) { - ib_unregister_event_handler(&file->uverbs_file->event_handler); - kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); - } - kref_put(&file->ref, ib_uverbs_release_event_file); +static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) +{ + struct ib_uverbs_async_event_file *file = filp->private_data; - return 0; + return fasync_helper(fd, filp, on, &file->ev_queue.async_queue); +} + +static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) +{ + struct ib_uverbs_completion_event_file *comp_ev_file = + filp->private_data; + + return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue); } -static const struct file_operations uverbs_event_fops = { +const struct file_operations uverbs_event_fops = { + .owner = THIS_MODULE, + .read = ib_uverbs_comp_event_read, + .poll = ib_uverbs_comp_event_poll, + .release = uverbs_uobject_fd_release, + .fasync = ib_uverbs_comp_event_fasync, +}; + +const struct file_operations uverbs_async_event_fops = { .owner = THIS_MODULE, - .read = ib_uverbs_event_read, - .poll = ib_uverbs_event_poll, - .release = ib_uverbs_event_close, - .fasync = ib_uverbs_event_fasync, - .llseek = no_llseek, + .read = ib_uverbs_async_event_read, + .poll = ib_uverbs_async_event_poll, + .release = uverbs_async_event_release, + .fasync = ib_uverbs_async_event_fasync, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) { - struct ib_uverbs_event_file *file = cq_context; + struct ib_uverbs_event_queue *ev_queue = cq_context; struct ib_ucq_object *uobj; struct ib_uverbs_event *entry; unsigned long flags; - if (!file) + if (!ev_queue) return; - spin_lock_irqsave(&file->lock, flags); - if (file->is_closed) { - spin_unlock_irqrestore(&file->lock, flags); + spin_lock_irqsave(&ev_queue->lock, flags); + if (ev_queue->is_closed) { + spin_unlock_irqrestore(&ev_queue->lock, flags); return; } - entry = kmalloc(sizeof *entry, GFP_ATOMIC); + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { - spin_unlock_irqrestore(&file->lock, flags); + spin_unlock_irqrestore(&ev_queue->lock, flags); return; } - uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); + uobj = cq->uobject; - entry->desc.comp.cq_handle = cq->uobject->user_handle; + entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle; entry->counter = &uobj->comp_events_reported; - list_add_tail(&entry->list, &file->event_list); + list_add_tail(&entry->list, &ev_queue->event_list); list_add_tail(&entry->obj_list, &uobj->comp_list); - spin_unlock_irqrestore(&file->lock, flags); + spin_unlock_irqrestore(&ev_queue->lock, flags); - wake_up_interruptible(&file->poll_wait); - kill_fasync(&file->async_queue, SIGIO, POLL_IN); + wake_up_interruptible(&ev_queue->poll_wait); + kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); } -static void ib_uverbs_async_handler(struct ib_uverbs_file *file, - __u64 element, __u64 event, - struct list_head *obj_list, - u32 *counter) +void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, + __u64 element, __u64 event, + struct list_head *obj_list, u32 *counter) { struct ib_uverbs_event *entry; unsigned long flags; - spin_lock_irqsave(&file->async_file->lock, flags); - if (file->async_file->is_closed) { - spin_unlock_irqrestore(&file->async_file->lock, flags); + if (!async_file) + return; + + spin_lock_irqsave(&async_file->ev_queue.lock, flags); + if (async_file->ev_queue.is_closed) { + spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); return; } - entry = kmalloc(sizeof *entry, GFP_ATOMIC); + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { - spin_unlock_irqrestore(&file->async_file->lock, flags); + spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); return; } - entry->desc.async.element = element; + entry->desc.async.element = element; entry->desc.async.event_type = event; - entry->counter = counter; + entry->desc.async.reserved = 0; + entry->counter = counter; - list_add_tail(&entry->list, &file->async_file->event_list); + list_add_tail(&entry->list, &async_file->ev_queue.event_list); if (obj_list) list_add_tail(&entry->obj_list, obj_list); - spin_unlock_irqrestore(&file->async_file->lock, flags); + spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); - wake_up_interruptible(&file->async_file->poll_wait); - kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN); + wake_up_interruptible(&async_file->ev_queue.poll_wait); + kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN); } -void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) +static void uverbs_uobj_event(struct ib_uevent_object *eobj, + struct ib_event *event) { - struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, - struct ib_ucq_object, uobject); + ib_uverbs_async_handler(eobj->event_file, + eobj->uobject.user_handle, event->event, + &eobj->event_list, &eobj->events_reported); +} - ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle, - event->event, &uobj->async_list, - &uobj->async_events_reported); +void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) +{ + uverbs_uobj_event(&event->element.cq->uobject->uevent, event); } void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) { - struct ib_uevent_object *uobj; + /* for XRC target qp's, check that qp is live */ + if (!event->element.qp->uobject) + return; - uobj = container_of(event->element.qp->uobject, - struct ib_uevent_object, uobject); + uverbs_uobj_event(&event->element.qp->uobject->uevent, event); +} - ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, - event->event, &uobj->event_list, - &uobj->events_reported); +void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) +{ + uverbs_uobj_event(&event->element.wq->uobject->uevent, event); } void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) { - struct ib_uevent_object *uobj; - - uobj = container_of(event->element.srq->uobject, - struct ib_uevent_object, uobject); - - ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, - event->event, &uobj->event_list, - &uobj->events_reported); + uverbs_uobj_event(&event->element.srq->uobject->uevent, event); } -void ib_uverbs_event_handler(struct ib_event_handler *handler, - struct ib_event *event) +static void ib_uverbs_event_handler(struct ib_event_handler *handler, + struct ib_event *event) { - struct ib_uverbs_file *file = - container_of(handler, struct ib_uverbs_file, event_handler); - - ib_uverbs_async_handler(file, event->element.port_num, event->event, - NULL, NULL); + ib_uverbs_async_handler( + container_of(handler, struct ib_uverbs_async_event_file, + event_handler), + event->element.port_num, event->event, NULL, NULL); } -struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, - int is_async) +void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue) { - struct ib_uverbs_event_file *ev_file; - struct file *filp; + spin_lock_init(&ev_queue->lock); + INIT_LIST_HEAD(&ev_queue->event_list); + init_waitqueue_head(&ev_queue->poll_wait); + ev_queue->is_closed = 0; + ev_queue->async_queue = NULL; +} - ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); - if (!ev_file) - return ERR_PTR(-ENOMEM); +void ib_uverbs_init_async_event_file( + struct ib_uverbs_async_event_file *async_file) +{ + struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile; + struct ib_device *ib_dev = async_file->uobj.context->device; - kref_init(&ev_file->ref); - spin_lock_init(&ev_file->lock); - INIT_LIST_HEAD(&ev_file->event_list); - init_waitqueue_head(&ev_file->poll_wait); - ev_file->uverbs_file = uverbs_file; - ev_file->async_queue = NULL; - ev_file->is_async = is_async; - ev_file->is_closed = 0; + ib_uverbs_init_event_queue(&async_file->ev_queue); - filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops, - ev_file, O_RDONLY); - if (IS_ERR(filp)) - kfree(ev_file); + /* The first async_event_file becomes the default one for the file. */ + mutex_lock(&uverbs_file->ucontext_lock); + if (!uverbs_file->default_async_file) { + /* Pairs with the put in ib_uverbs_release_file */ + uverbs_uobject_get(&async_file->uobj); + smp_store_release(&uverbs_file->default_async_file, async_file); + } + mutex_unlock(&uverbs_file->ucontext_lock); - return filp; + INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev, + ib_uverbs_event_handler); + ib_register_event_handler(&async_file->event_handler); } -/* - * Look up a completion event file by FD. If lookup is successful, - * takes a ref to the event file struct that it returns; if - * unsuccessful, returns NULL. - */ -struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) +static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, + struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, + const struct uverbs_api_write_method *method_elm) { - struct ib_uverbs_event_file *ev_file = NULL; - struct fd f = fdget(fd); + if (method_elm->is_ex) { + count -= sizeof(*hdr) + sizeof(*ex_hdr); - if (!f.file) - return NULL; + if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) + return -EINVAL; - if (f.file->f_op != &uverbs_event_fops) - goto out; + if (hdr->in_words * 8 < method_elm->req_size) + return -ENOSPC; - ev_file = f.file->private_data; - if (ev_file->is_async) { - ev_file = NULL; - goto out; + if (ex_hdr->cmd_hdr_reserved) + return -EINVAL; + + if (ex_hdr->response) { + if (!hdr->out_words && !ex_hdr->provider_out_words) + return -EINVAL; + + if (hdr->out_words * 8 < method_elm->resp_size) + return -ENOSPC; + + if (!access_ok(u64_to_user_ptr(ex_hdr->response), + (hdr->out_words + ex_hdr->provider_out_words) * 8)) + return -EFAULT; + } else { + if (hdr->out_words || ex_hdr->provider_out_words) + return -EINVAL; + } + + return 0; } - kref_get(&ev_file->ref); + /* not extended command */ + if (hdr->in_words * 4 != count) + return -EINVAL; -out: - fdput(f); - return ev_file; + if (count < method_elm->req_size + sizeof(*hdr)) { + /* + * rdma-core v18 and v19 have a bug where they send DESTROY_CQ + * with a 16 byte write instead of 24. Old kernels didn't + * check the size so they allowed this. Now that the size is + * checked provide a compatibility work around to not break + * those userspaces. + */ + if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && + count == 16) { + hdr->in_words = 6; + return 0; + } + return -ENOSPC; + } + if (hdr->out_words * 4 < method_elm->resp_size) + return -ENOSPC; + + return 0; } static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; + const struct uverbs_api_write_method *method_elm; + struct uverbs_api *uapi = file->device->uapi; + struct ib_uverbs_ex_cmd_hdr ex_hdr; struct ib_uverbs_cmd_hdr hdr; + struct uverbs_attr_bundle bundle; + int srcu_key; + ssize_t ret; + + if (!ib_safe_file_access(filp)) { + pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + task_tgid_vnr(current), current->comm); + return -EACCES; + } - if (count < sizeof hdr) + if (count < sizeof(hdr)) return -EINVAL; - if (copy_from_user(&hdr, buf, sizeof hdr)) + if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; - if (hdr.in_words * 4 != count) - return -EINVAL; + method_elm = uapi_get_method(uapi, hdr.command); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); - if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[hdr.command]) - return -EINVAL; + if (method_elm->is_ex) { + if (count < (sizeof(hdr) + sizeof(ex_hdr))) + return -EINVAL; + if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) + return -EFAULT; + } - if (!file->ucontext && - hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) - return -EINVAL; + ret = verify_hdr(&hdr, &ex_hdr, count, method_elm); + if (ret) + return ret; + + srcu_key = srcu_read_lock(&file->device->disassociate_srcu); + + buf += sizeof(hdr); + + memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); + bundle.ufile = file; + bundle.context = NULL; /* only valid if bundle has uobject */ + bundle.uobject = NULL; + if (!method_elm->is_ex) { + size_t in_len = hdr.in_words * 4 - sizeof(hdr); + size_t out_len = hdr.out_words * 4; + u64 response = 0; + + if (method_elm->has_udata) { + bundle.driver_udata.inlen = + in_len - method_elm->req_size; + in_len = method_elm->req_size; + if (bundle.driver_udata.inlen) + bundle.driver_udata.inbuf = buf + in_len; + else + bundle.driver_udata.inbuf = NULL; + } else { + memset(&bundle.driver_udata, 0, + sizeof(bundle.driver_udata)); + } - if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) - return -ENOSYS; + if (method_elm->has_resp) { + /* + * The macros check that if has_resp is set + * then the command request structure starts + * with a '__aligned u64 response' member. + */ + ret = get_user(response, (const u64 __user *)buf); + if (ret) + goto out_unlock; + + if (method_elm->has_udata) { + bundle.driver_udata.outlen = + out_len - method_elm->resp_size; + out_len = method_elm->resp_size; + if (bundle.driver_udata.outlen) + bundle.driver_udata.outbuf = + u64_to_user_ptr(response + + out_len); + else + bundle.driver_udata.outbuf = NULL; + } + } else { + bundle.driver_udata.outlen = 0; + bundle.driver_udata.outbuf = NULL; + } + + ib_uverbs_init_udata_buf_or_null( + &bundle.ucore, buf, u64_to_user_ptr(response), + in_len, out_len); + } else { + buf += sizeof(ex_hdr); + + ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf, + u64_to_user_ptr(ex_hdr.response), + hdr.in_words * 8, hdr.out_words * 8); + + ib_uverbs_init_udata_buf_or_null( + &bundle.driver_udata, buf + bundle.ucore.inlen, + u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, + ex_hdr.provider_in_words * 8, + ex_hdr.provider_out_words * 8); + + } - return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, - hdr.in_words * 4, hdr.out_words * 4); + ret = method_elm->handler(&bundle); + if (bundle.uobject) + uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true, + !ret, &bundle); +out_unlock: + srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); + return (ret) ? : count; } +static const struct vm_operations_struct rdma_umap_ops; + static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) { struct ib_uverbs_file *file = filp->private_data; + struct ib_ucontext *ucontext; + int ret = 0; + int srcu_key; - if (!file->ucontext) - return -ENODEV; - else - return file->device->ib_dev->mmap(file->ucontext, vma); + srcu_key = srcu_read_lock(&file->device->disassociate_srcu); + ucontext = ib_uverbs_get_ucontext_file(file); + if (IS_ERR(ucontext)) { + ret = PTR_ERR(ucontext); + goto out; + } + + mutex_lock(&file->disassociation_lock); + + vma->vm_ops = &rdma_umap_ops; + ret = ucontext->device->ops.mmap(ucontext, vma); + + mutex_unlock(&file->disassociation_lock); +out: + srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); + return ret; +} + +/* + * The VMA has been dup'd, initialize the vm_private_data with a new tracking + * struct + */ +static void rdma_umap_open(struct vm_area_struct *vma) +{ + struct ib_uverbs_file *ufile = vma->vm_file->private_data; + struct rdma_umap_priv *opriv = vma->vm_private_data; + struct rdma_umap_priv *priv; + + if (!opriv) + return; + + /* We are racing with disassociation */ + if (!down_read_trylock(&ufile->hw_destroy_rwsem)) + goto out_zap; + mutex_lock(&ufile->disassociation_lock); + + /* + * Disassociation already completed, the VMA should already be zapped. + */ + if (!ufile->ucontext) + goto out_unlock; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto out_unlock; + rdma_umap_priv_init(priv, vma, opriv->entry); + + mutex_unlock(&ufile->disassociation_lock); + up_read(&ufile->hw_destroy_rwsem); + return; + +out_unlock: + mutex_unlock(&ufile->disassociation_lock); + up_read(&ufile->hw_destroy_rwsem); +out_zap: + /* + * We can't allow the VMA to be created with the actual IO pages, that + * would break our API contract, and it can't be stopped at this + * point, so zap it. + */ + vma->vm_private_data = NULL; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void rdma_umap_close(struct vm_area_struct *vma) +{ + struct ib_uverbs_file *ufile = vma->vm_file->private_data; + struct rdma_umap_priv *priv = vma->vm_private_data; + + if (!priv) + return; + + /* + * The vma holds a reference on the struct file that created it, which + * in turn means that the ib_uverbs_file is guaranteed to exist at + * this point. + */ + mutex_lock(&ufile->umap_lock); + if (priv->entry) + rdma_user_mmap_entry_put(priv->entry); + + list_del(&priv->list); + mutex_unlock(&ufile->umap_lock); + kfree(priv); +} + +/* + * Once the zap_vma_ptes has been called touches to the VMA will come here and + * we return a dummy writable zero page for all the pfns. + */ +static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) +{ + struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; + struct rdma_umap_priv *priv = vmf->vma->vm_private_data; + vm_fault_t ret = 0; + + if (!priv) + return VM_FAULT_SIGBUS; + + /* Read only pages can just use the system zero page. */ + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { + vmf->page = ZERO_PAGE(vmf->address); + get_page(vmf->page); + return 0; + } + + mutex_lock(&ufile->umap_lock); + if (!ufile->disassociate_page) + ufile->disassociate_page = + alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); + + if (ufile->disassociate_page) { + /* + * This VMA is forced to always be shared so this doesn't have + * to worry about COW. + */ + vmf->page = ufile->disassociate_page; + get_page(vmf->page); + } else { + ret = VM_FAULT_SIGBUS; + } + mutex_unlock(&ufile->umap_lock); + + return ret; } +static const struct vm_operations_struct rdma_umap_ops = { + .open = rdma_umap_open, + .close = rdma_umap_close, + .fault = rdma_umap_fault, +}; + +void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) +{ + struct rdma_umap_priv *priv, *next_priv; + + mutex_lock(&ufile->disassociation_lock); + + while (1) { + struct mm_struct *mm = NULL; + + /* Get an arbitrary mm pointer that hasn't been cleaned yet */ + mutex_lock(&ufile->umap_lock); + while (!list_empty(&ufile->umaps)) { + int ret; + + priv = list_first_entry(&ufile->umaps, + struct rdma_umap_priv, list); + mm = priv->vma->vm_mm; + ret = mmget_not_zero(mm); + if (!ret) { + list_del_init(&priv->list); + if (priv->entry) { + rdma_user_mmap_entry_put(priv->entry); + priv->entry = NULL; + } + mm = NULL; + continue; + } + break; + } + mutex_unlock(&ufile->umap_lock); + if (!mm) { + mutex_unlock(&ufile->disassociation_lock); + return; + } + + /* + * The umap_lock is nested under mmap_lock since it used within + * the vma_ops callbacks, so we have to clean the list one mm + * at a time to get the lock ordering right. Typically there + * will only be one mm, so no big deal. + */ + mmap_read_lock(mm); + mutex_lock(&ufile->umap_lock); + list_for_each_entry_safe (priv, next_priv, &ufile->umaps, + list) { + struct vm_area_struct *vma = priv->vma; + + if (vma->vm_mm != mm) + continue; + list_del_init(&priv->list); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + + if (priv->entry) { + rdma_user_mmap_entry_put(priv->entry); + priv->entry = NULL; + } + } + mutex_unlock(&ufile->umap_lock); + mmap_read_unlock(mm); + mmput(mm); + } + + mutex_unlock(&ufile->disassociation_lock); +} + +/** + * rdma_user_mmap_disassociate() - Revoke mmaps for a device + * @device: device to revoke + * + * This function should be called by drivers that need to disable mmaps for the + * device, for instance because it is going to be reset. + */ +void rdma_user_mmap_disassociate(struct ib_device *device) +{ + struct ib_uverbs_device *uverbs_dev = + ib_get_client_data(device, &uverbs_client); + struct ib_uverbs_file *ufile; + + mutex_lock(&uverbs_dev->lists_mutex); + list_for_each_entry(ufile, &uverbs_dev->uverbs_file_list, list) { + if (ufile->ucontext) + uverbs_user_mmap_disassociate(ufile); + } + mutex_unlock(&uverbs_dev->lists_mutex); +} +EXPORT_SYMBOL(rdma_user_mmap_disassociate); + /* * ib_uverbs_open() does not need the BKL: * @@ -625,40 +930,82 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) { struct ib_uverbs_device *dev; struct ib_uverbs_file *file; + struct ib_device *ib_dev; int ret; + int module_dependent; + int srcu_key; dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); - if (dev) - kref_get(&dev->ref); - else + if (!refcount_inc_not_zero(&dev->refcount)) return -ENXIO; - if (!try_module_get(dev->ib_dev->owner)) { - ret = -ENODEV; + get_device(&dev->dev); + srcu_key = srcu_read_lock(&dev->disassociate_srcu); + mutex_lock(&dev->lists_mutex); + ib_dev = srcu_dereference(dev->ib_dev, + &dev->disassociate_srcu); + if (!ib_dev) { + ret = -EIO; goto err; } - file = kmalloc(sizeof *file, GFP_KERNEL); + if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) { + ret = -EPERM; + goto err; + } + + /* In case IB device supports disassociate ucontext, there is no hard + * dependency between uverbs device and its low level device. + */ + module_dependent = !(ib_dev->ops.disassociate_ucontext); + + if (module_dependent) { + if (!try_module_get(ib_dev->ops.owner)) { + ret = -ENODEV; + goto err; + } + } + + file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) { ret = -ENOMEM; - goto err_module; + if (module_dependent) + goto err_module; + + goto err; } file->device = dev; - file->ucontext = NULL; - file->async_file = NULL; kref_init(&file->ref); - mutex_init(&file->mutex); + mutex_init(&file->ucontext_lock); + + spin_lock_init(&file->uobjects_lock); + INIT_LIST_HEAD(&file->uobjects); + init_rwsem(&file->hw_destroy_rwsem); + mutex_init(&file->umap_lock); + INIT_LIST_HEAD(&file->umaps); + + mutex_init(&file->disassociation_lock); filp->private_data = file; + list_add_tail(&file->list, &dev->uverbs_file_list); + mutex_unlock(&dev->lists_mutex); + srcu_read_unlock(&dev->disassociate_srcu, srcu_key); + + setup_ufile_idr_uobject(file); - return nonseekable_open(inode, filp); + return stream_open(inode, filp); err_module: - module_put(dev->ib_dev->owner); + module_put(ib_dev->ops.owner); err: - kref_put(&dev->ref, ib_uverbs_release_dev); + mutex_unlock(&dev->lists_mutex); + srcu_read_unlock(&dev->disassociate_srcu, srcu_key); + if (refcount_dec_and_test(&dev->refcount)) + ib_uverbs_comp_dev(dev); + + put_device(&dev->dev); return ret; } @@ -666,10 +1013,11 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; - ib_uverbs_cleanup_ucontext(file, file->ucontext); + uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE); - if (file->async_file) - kref_put(&file->async_file->ref, ib_uverbs_release_event_file); + mutex_lock(&file->device->lists_mutex); + list_del_init(&file->list); + mutex_unlock(&file->device->lists_mutex); kref_put(&file->ref, ib_uverbs_release_file); @@ -681,7 +1029,8 @@ static const struct file_operations uverbs_fops = { .write = ib_uverbs_write, .open = ib_uverbs_open, .release = ib_uverbs_close, - .llseek = no_llseek, + .unlocked_ioctl = ib_uverbs_ioctl, + .compat_ioctl = compat_ptr_ioctl, }; static const struct file_operations uverbs_mmap_fops = { @@ -690,216 +1039,300 @@ static const struct file_operations uverbs_mmap_fops = { .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, .release = ib_uverbs_close, - .llseek = no_llseek, + .unlocked_ioctl = ib_uverbs_ioctl, + .compat_ioctl = compat_ptr_ioctl, }; +static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data, + struct ib_client_nl_info *res) +{ + struct ib_uverbs_device *uverbs_dev = client_data; + int ret; + + if (res->port != -1) + return -EINVAL; + + res->abi = ibdev->ops.uverbs_abi_ver; + res->cdev = &uverbs_dev->dev; + + /* + * To support DRIVER_ID binding in userspace some of the driver need + * upgrading to expose their PCI dependent revision information + * through get_context instead of relying on modalias matching. When + * the drivers are fixed they can drop this flag. + */ + if (!ibdev->ops.uverbs_no_driver_id_binding) { + ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID, + ibdev->ops.driver_id); + if (ret) + return ret; + } + return 0; +} + static struct ib_client uverbs_client = { .name = "uverbs", + .no_kverbs_req = true, .add = ib_uverbs_add_one, - .remove = ib_uverbs_remove_one + .remove = ib_uverbs_remove_one, + .get_nl_info = ib_uverbs_get_nl_info, }; +MODULE_ALIAS_RDMA_CLIENT("uverbs"); -static ssize_t show_ibdev(struct device *device, struct device_attribute *attr, +static ssize_t ibdev_show(struct device *device, struct device_attribute *attr, char *buf) { - struct ib_uverbs_device *dev = dev_get_drvdata(device); + struct ib_uverbs_device *dev = + container_of(device, struct ib_uverbs_device, dev); + int ret = -ENODEV; + int srcu_key; + struct ib_device *ib_dev; - if (!dev) - return -ENODEV; + srcu_key = srcu_read_lock(&dev->disassociate_srcu); + ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); + if (ib_dev) + ret = sysfs_emit(buf, "%s\n", dev_name(&ib_dev->dev)); + srcu_read_unlock(&dev->disassociate_srcu, srcu_key); - return sprintf(buf, "%s\n", dev->ib_dev->name); + return ret; } -static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); +static DEVICE_ATTR_RO(ibdev); -static ssize_t show_dev_abi_version(struct device *device, - struct device_attribute *attr, char *buf) +static ssize_t abi_version_show(struct device *device, + struct device_attribute *attr, char *buf) { - struct ib_uverbs_device *dev = dev_get_drvdata(device); + struct ib_uverbs_device *dev = + container_of(device, struct ib_uverbs_device, dev); + int ret = -ENODEV; + int srcu_key; + struct ib_device *ib_dev; - if (!dev) - return -ENODEV; + srcu_key = srcu_read_lock(&dev->disassociate_srcu); + ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); + if (ib_dev) + ret = sysfs_emit(buf, "%u\n", ib_dev->ops.uverbs_abi_ver); + srcu_read_unlock(&dev->disassociate_srcu, srcu_key); - return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver); + return ret; } -static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL); +static DEVICE_ATTR_RO(abi_version); + +static struct attribute *ib_dev_attrs[] = { + &dev_attr_abi_version.attr, + &dev_attr_ibdev.attr, + NULL, +}; + +static const struct attribute_group dev_attr_group = { + .attrs = ib_dev_attrs, +}; static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_VERBS_ABI_VERSION)); -static dev_t overflow_maj; -static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES); - -/* - * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by - * requesting a new major number and doubling the number of max devices we - * support. It's stupid, but simple. - */ -static int find_overflow_devnum(void) +static int ib_uverbs_create_uapi(struct ib_device *device, + struct ib_uverbs_device *uverbs_dev) { - int ret; - - if (!overflow_maj) { - ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, - "infiniband_verbs"); - if (ret) { - printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); - return ret; - } - } + struct uverbs_api *uapi; - ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES); - if (ret >= IB_UVERBS_MAX_DEVICES) - return -1; + uapi = uverbs_alloc_api(device); + if (IS_ERR(uapi)) + return PTR_ERR(uapi); - return ret; + uverbs_dev->uapi = uapi; + return 0; } -static void ib_uverbs_add_one(struct ib_device *device) +static int ib_uverbs_add_one(struct ib_device *device) { int devnum; dev_t base; struct ib_uverbs_device *uverbs_dev; + int ret; - if (!device->alloc_ucontext) - return; + if (!device->ops.alloc_ucontext || + device->type == RDMA_DEVICE_TYPE_SMI) + return -EOPNOTSUPP; - uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL); + uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); if (!uverbs_dev) - return; + return -ENOMEM; + + ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); + if (ret) { + kfree(uverbs_dev); + return -ENOMEM; + } - kref_init(&uverbs_dev->ref); + device_initialize(&uverbs_dev->dev); + uverbs_dev->dev.class = &uverbs_class; + uverbs_dev->dev.parent = device->dev.parent; + uverbs_dev->dev.release = ib_uverbs_release_dev; + uverbs_dev->groups[0] = &dev_attr_group; + uverbs_dev->dev.groups = uverbs_dev->groups; + refcount_set(&uverbs_dev->refcount, 1); init_completion(&uverbs_dev->comp); uverbs_dev->xrcd_tree = RB_ROOT; mutex_init(&uverbs_dev->xrcd_tree_mutex); + mutex_init(&uverbs_dev->lists_mutex); + INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list); + rcu_assign_pointer(uverbs_dev->ib_dev, device); + uverbs_dev->num_comp_vectors = device->num_comp_vectors; - spin_lock(&map_lock); - devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); - if (devnum >= IB_UVERBS_MAX_DEVICES) { - spin_unlock(&map_lock); - devnum = find_overflow_devnum(); - if (devnum < 0) - goto err; - - spin_lock(&map_lock); - uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES; - base = devnum + overflow_maj; - set_bit(devnum, overflow_map); - } else { - uverbs_dev->devnum = devnum; - base = devnum + IB_UVERBS_BASE_DEV; - set_bit(devnum, dev_map); + devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1, + GFP_KERNEL); + if (devnum < 0) { + ret = -ENOMEM; + goto err; } - spin_unlock(&map_lock); + uverbs_dev->devnum = devnum; + if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) + base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; + else + base = IB_UVERBS_BASE_DEV + devnum; - uverbs_dev->ib_dev = device; - uverbs_dev->num_comp_vectors = device->num_comp_vectors; + ret = ib_uverbs_create_uapi(device, uverbs_dev); + if (ret) + goto err_uapi; - cdev_init(&uverbs_dev->cdev, NULL); - uverbs_dev->cdev.owner = THIS_MODULE; - uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; - kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); - if (cdev_add(&uverbs_dev->cdev, base, 1)) - goto err_cdev; - - uverbs_dev->dev = device_create(uverbs_class, device->dma_device, - uverbs_dev->cdev.dev, uverbs_dev, - "uverbs%d", uverbs_dev->devnum); - if (IS_ERR(uverbs_dev->dev)) - goto err_cdev; - - if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev)) - goto err_class; - if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) - goto err_class; + uverbs_dev->dev.devt = base; + dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); - ib_set_client_data(device, &uverbs_client, uverbs_dev); - - return; + cdev_init(&uverbs_dev->cdev, + device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); + uverbs_dev->cdev.owner = THIS_MODULE; -err_class: - device_destroy(uverbs_class, uverbs_dev->cdev.dev); + ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); + if (ret) + goto err_uapi; -err_cdev: - cdev_del(&uverbs_dev->cdev); - if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) - clear_bit(devnum, dev_map); - else - clear_bit(devnum, overflow_map); + ib_set_client_data(device, &uverbs_client, uverbs_dev); + return 0; +err_uapi: + ida_free(&uverbs_ida, devnum); err: - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); + if (refcount_dec_and_test(&uverbs_dev->refcount)) + ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); - kfree(uverbs_dev); - return; + put_device(&uverbs_dev->dev); + return ret; } -static void ib_uverbs_remove_one(struct ib_device *device) +static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, + struct ib_device *ib_dev) { - struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client); + struct ib_uverbs_file *file; - if (!uverbs_dev) - return; + /* Pending running commands to terminate */ + uverbs_disassociate_api_pre(uverbs_dev); - dev_set_drvdata(uverbs_dev->dev, NULL); - device_destroy(uverbs_class, uverbs_dev->cdev.dev); - cdev_del(&uverbs_dev->cdev); + mutex_lock(&uverbs_dev->lists_mutex); + while (!list_empty(&uverbs_dev->uverbs_file_list)) { + file = list_first_entry(&uverbs_dev->uverbs_file_list, + struct ib_uverbs_file, list); + list_del_init(&file->list); + kref_get(&file->ref); - if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) - clear_bit(uverbs_dev->devnum, dev_map); - else - clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); + /* We must release the mutex before going ahead and calling + * uverbs_cleanup_ufile, as it might end up indirectly calling + * uverbs_close, for example due to freeing the resources (e.g + * mmput). + */ + mutex_unlock(&uverbs_dev->lists_mutex); - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); - wait_for_completion(&uverbs_dev->comp); - kfree(uverbs_dev); + uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE); + kref_put(&file->ref, ib_uverbs_release_file); + + mutex_lock(&uverbs_dev->lists_mutex); + } + mutex_unlock(&uverbs_dev->lists_mutex); + + uverbs_disassociate_api(uverbs_dev->uapi); } -static char *uverbs_devnode(struct device *dev, umode_t *mode) +static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) { - if (mode) - *mode = 0666; - return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); + struct ib_uverbs_device *uverbs_dev = client_data; + int wait_clients = 1; + + cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); + ida_free(&uverbs_ida, uverbs_dev->devnum); + + if (device->ops.disassociate_ucontext) { + /* We disassociate HW resources and immediately return. + * Userspace will see a EIO errno for all future access. + * Upon returning, ib_device may be freed internally and is not + * valid any more. + * uverbs_device is still available until all clients close + * their files, then the uverbs device ref count will be zero + * and its resources will be freed. + * Note: At this point no more files can be opened since the + * cdev was deleted, however active clients can still issue + * commands and close their open files. + */ + ib_uverbs_free_hw_resources(uverbs_dev, device); + wait_clients = 0; + } + + if (refcount_dec_and_test(&uverbs_dev->refcount)) + ib_uverbs_comp_dev(uverbs_dev); + if (wait_clients) + wait_for_completion(&uverbs_dev->comp); + + put_device(&uverbs_dev->dev); } static int __init ib_uverbs_init(void) { int ret; - ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, + ret = register_chrdev_region(IB_UVERBS_BASE_DEV, + IB_UVERBS_NUM_FIXED_MINOR, "infiniband_verbs"); if (ret) { - printk(KERN_ERR "user_verbs: couldn't register device number\n"); + pr_err("user_verbs: couldn't register device number\n"); goto out; } - uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); - if (IS_ERR(uverbs_class)) { - ret = PTR_ERR(uverbs_class); - printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); - goto out_chrdev; + ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0, + IB_UVERBS_NUM_DYNAMIC_MINOR, + "infiniband_verbs"); + if (ret) { + pr_err("couldn't register dynamic device number\n"); + goto out_alloc; } - uverbs_class->devnode = uverbs_devnode; + ret = class_register(&uverbs_class); + if (ret) { + pr_err("user_verbs: couldn't create class infiniband_verbs\n"); + goto out_chrdev; + } - ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); + ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr); if (ret) { - printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); + pr_err("user_verbs: couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&uverbs_client); if (ret) { - printk(KERN_ERR "user_verbs: couldn't register client\n"); + pr_err("user_verbs: couldn't register client\n"); goto out_class; } return 0; out_class: - class_destroy(uverbs_class); + class_unregister(&uverbs_class); out_chrdev: - unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); + unregister_chrdev_region(dynamic_uverbs_dev, + IB_UVERBS_NUM_DYNAMIC_MINOR); + +out_alloc: + unregister_chrdev_region(IB_UVERBS_BASE_DEV, + IB_UVERBS_NUM_FIXED_MINOR); out: return ret; @@ -908,17 +1341,13 @@ out: static void __exit ib_uverbs_cleanup(void) { ib_unregister_client(&uverbs_client); - class_destroy(uverbs_class); - unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); - if (overflow_maj) - unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES); - idr_destroy(&ib_uverbs_pd_idr); - idr_destroy(&ib_uverbs_mr_idr); - idr_destroy(&ib_uverbs_mw_idr); - idr_destroy(&ib_uverbs_ah_idr); - idr_destroy(&ib_uverbs_cq_idr); - idr_destroy(&ib_uverbs_qp_idr); - idr_destroy(&ib_uverbs_srq_idr); + class_unregister(&uverbs_class); + unregister_chrdev_region(IB_UVERBS_BASE_DEV, + IB_UVERBS_NUM_FIXED_MINOR); + unregister_chrdev_region(dynamic_uverbs_dev, + IB_UVERBS_NUM_DYNAMIC_MINOR); + ib_cleanup_ucaps(); + mmu_notifier_synchronize(); } module_init(ib_uverbs_init); |
