summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_cq.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-09-14 11:42:03 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-09-24 10:14:59 -0300
commitae6e843fe08d0ea8e158815809dcc20e3a1afc22 (patch)
tree723ed25b8a0edfde968f72ace5f837daf880e61d /drivers/infiniband/sw/rxe/rxe_cq.c
parent6bda39149d4b8920fdb8744090653aca3daa792d (diff)
RDMA/rxe: Add memory barriers to kernel queues
Earlier patches added memory barriers to protect user space to kernel space communications. The user space queues were previously shown to have occasional memory synchonization errors which were removed by adding smp_load_acquire, smp_store_release barriers. This patch extends that to the case where queues are used between kernel space threads. This patch also extends the queue types to include kernel ULP queues which access the other end of the queues in kernel verbs calls like poll_cq and post_send/recv. Link: https://lore.kernel.org/r/20210914164206.19768-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_cq.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c25
1 files changed, 5 insertions, 20 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index aef288f164fd..4eedaa0244b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -25,11 +25,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cq) {
- if (cq->is_user)
- count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
- else
- count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
-
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
@@ -65,7 +61,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int err;
enum queue_type type;
- type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_TO_CLIENT;
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe), type);
if (!cq->queue) {
@@ -117,11 +113,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
spin_lock_irqsave(&cq->cq_lock, flags);
- if (cq->is_user)
- full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
- else
- full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
-
+ full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
@@ -134,17 +126,10 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return -EBUSY;
}
- if (cq->is_user)
- addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
- else
- addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
-
+ addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
memcpy(addr, cqe, sizeof(*cqe));
- if (cq->is_user)
- advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
- else
- advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
spin_unlock_irqrestore(&cq->cq_lock, flags);