summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/erdma/erdma_qp.c
diff options
context:
space:
mode:
authorCheng Xu <chengyou@linux.alibaba.com>2022-11-07 10:18:45 +0800
committerLeon Romanovsky <leon@kernel.org>2022-11-07 09:55:04 +0200
commit0ca9c2e2844aa285c3656a29d4803839cfa8bca9 (patch)
tree3d39727b3a47a7c72743c809e29ee17c77fe0429 /drivers/infiniband/hw/erdma/erdma_qp.c
parent71c6925f280ae8cb52eafee2404ae75c176c28ba (diff)
RDMA/erdma: Implement atomic operations support
Add atomic operations support in post_send and poll_cq implementation. Also, rename 'laddr' and 'lkey' in struct erdma_sge to 'addr' and 'key', because this structure is used for both local and remote SGEs. Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com> Link: https://lore.kernel.org/r/20221107021845.44598-4-chengyou@linux.alibaba.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/erdma/erdma_qp.c')
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c40
1 files changed, 35 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index c7f343173cb9..521e97258de7 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -285,15 +285,16 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
- struct erdma_mr *mr;
+ struct erdma_sge *sge;
__le32 *length_field;
+ struct erdma_mr *mr;
u64 wqe_hdr, *entry;
- struct ib_sge *sge;
u32 attrs;
int ret;
@@ -360,9 +361,9 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT);
- sge->addr = rdma_wr->remote_addr;
- sge->lkey = rdma_wr->rkey;
- sge->length = send_wr->sg_list[0].length;
+ sge->addr = cpu_to_le64(rdma_wr->remote_addr);
+ sge->key = cpu_to_le32(rdma_wr->rkey);
+ sge->length = cpu_to_le32(send_wr->sg_list[0].length);
wqe_size = sizeof(struct erdma_readreq_sqe) +
send_wr->num_sge * sizeof(struct ib_sge);
@@ -423,6 +424,35 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
wqe_size = sizeof(struct erdma_reg_mr_sqe);
goto out;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ atomic_sqe = (struct erdma_atomic_sqe *)entry;
+ if (op == IB_WR_ATOMIC_CMP_AND_SWP) {
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_ATOMIC_CAS);
+ atomic_sqe->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(send_wr)->swap);
+ atomic_sqe->cmp_data =
+ cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ } else {
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_ATOMIC_FAD);
+ atomic_sqe->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ }
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = cpu_to_le64(atomic_wr(send_wr)->remote_addr);
+ sge->key = cpu_to_le32(atomic_wr(send_wr)->rkey);
+ sge++;
+
+ sge->addr = cpu_to_le64(send_wr->sg_list[0].addr);
+ sge->key = cpu_to_le32(send_wr->sg_list[0].lkey);
+ sge->length = cpu_to_le32(send_wr->sg_list[0].length);
+
+ wqe_size = sizeof(*atomic_sqe);
+ goto out;
default:
return -EOPNOTSUPP;
}