summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/uverbs_std_types.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2020-01-08 19:21:54 +0200
committerJason Gunthorpe <jgg@mellanox.com>2020-01-13 16:17:19 -0400
commitf7c8416ccea52b41e29227b3a5066540f51ee471 (patch)
tree4d18cf6136d18567f13180fa7a8e196be866ef97 /drivers/infiniband/core/uverbs_std_types.c
parent6898d1c661d79f4707d8ba82991b2195822780ca (diff)
RDMA/core: Simplify destruction of FD uobjects
FD uobjects have a weird split between the struct file and uobject world. Simplify this to make them pure uobjects and use a generic release method for all struct file operations. This fixes the control flow so that mlx5_cmd_cleanup_async_ctx() is always called before erasing the linked list contents to make the concurrancy simpler to understand. For this to work the uobject destruction must fence anything that it is cleaning up - the design must not rely on struct file lifetime. Only deliver_event() relies on the struct file to when adding new events to the queue, add a is_destroyed check under lock to block it. Link: https://lore.kernel.org/r/1578504126-9400-3-git-send-email-yishaih@mellanox.com Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/core/uverbs_std_types.c')
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 35b2e2c640cc..def038a0fe77 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -202,22 +202,29 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
return 0;
}
-static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static int
+uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
- struct ib_uverbs_completion_event_file *comp_event_file =
+ struct ib_uverbs_completion_event_file *file =
container_of(uobj, struct ib_uverbs_completion_event_file,
uobj);
- struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
+ struct ib_uverbs_event_queue *event_queue = &file->ev_queue;
+ struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&event_queue->lock);
event_queue->is_closed = 1;
spin_unlock_irq(&event_queue->lock);
+ wake_up_interruptible(&event_queue->poll_wait);
+ kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
- if (why == RDMA_REMOVE_DRIVER_REMOVE) {
- wake_up_interruptible(&event_queue->poll_wait);
- kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
+ spin_lock_irq(&event_queue->lock);
+ list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
+ if (entry->counter)
+ list_del(&entry->obj_list);
+ kfree(entry);
}
+ spin_unlock_irq(&event_queue->lock);
return 0;
};
@@ -230,7 +237,7 @@ EXPORT_SYMBOL(uverbs_destroy_def_handler);
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_COMP_CHANNEL,
UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
- uverbs_hot_unplug_completion_event_file,
+ uverbs_completion_event_file_destroy_uobj,
&uverbs_event_fops,
"[infinibandevent]",
O_RDONLY));