summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/uk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/irdma/uk.c')
-rw-r--r--drivers/infiniband/hw/irdma/uk.c1004
1 files changed, 625 insertions, 379 deletions
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index a6d52c20091c..f0846b800913 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1,7 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "defs.h"
#include "user.h"
#include "irdma.h"
@@ -13,16 +12,16 @@
* @sge: sge length and stag
* @valid: The wqe valid
*/
-static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8,
@@ -38,14 +37,14 @@ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
* @valid: wqe valid flag
*/
static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
- struct irdma_sge *sge, u8 valid)
+ struct ib_sge *sge, u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8, 0);
@@ -56,7 +55,7 @@ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
* irdma_nop_1 - insert a NOP wqe
* @qp: hw qp ptr
*/
-static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
+static int irdma_nop_1(struct irdma_qp_uk *qp)
{
u64 hdr;
__le64 *wqe;
@@ -64,7 +63,7 @@ static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
bool signaled = false;
if (!qp->sq_ring.head)
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
wqe = qp->sq_base[wqe_idx].elem;
@@ -94,16 +93,18 @@ static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
*/
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
{
- __le64 *wqe;
+ struct irdma_qp_quanta *sq;
u32 wqe_idx;
if (!(qp_wqe_idx & 0x7F)) {
wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
- wqe = qp->sq_base[wqe_idx].elem;
+ sq = qp->sq_base + wqe_idx;
if (wqe_idx)
- memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
+ 128 * sizeof(*sq));
else
- memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0xFF : 0,
+ 128 * sizeof(*sq));
}
}
@@ -113,68 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
*/
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
{
- u64 temp;
- u32 hw_sq_tail;
- u32 sw_sq_head;
-
- /* valid bit is written and loads completed before reading shadow */
- mb();
-
- /* read the doorbell shadow area */
- get_64bit_val(qp->shadow_area, 0, &temp);
-
- hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
- sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
- if (sw_sq_head != qp->initial_ring.head) {
- if (qp->push_dropped) {
- writel(qp->qp_id, qp->wqe_alloc_db);
- qp->push_dropped = false;
- } else if (sw_sq_head != hw_sq_tail) {
- if (sw_sq_head > qp->initial_ring.head) {
- if (hw_sq_tail >= qp->initial_ring.head &&
- hw_sq_tail < sw_sq_head)
- writel(qp->qp_id, qp->wqe_alloc_db);
- } else {
- if (hw_sq_tail >= qp->initial_ring.head ||
- hw_sq_tail < sw_sq_head)
- writel(qp->qp_id, qp->wqe_alloc_db);
- }
- }
- }
-
- qp->initial_ring.head = qp->sq_ring.head;
-}
-
-/**
- * irdma_qp_ring_push_db - ring qp doorbell
- * @qp: hw qp ptr
- * @wqe_idx: wqe index
- */
-static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
-{
- set_32bit_val(qp->push_db, 0,
- FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
- qp->initial_ring.head = qp->sq_ring.head;
- qp->push_mode = true;
- qp->push_dropped = false;
-}
-
-void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq)
-{
- __le64 *push;
-
- if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
- IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
- !qp->push_mode) {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- } else {
- push = (__le64 *)((uintptr_t)qp->push_wqe +
- (wqe_idx & 0x7) * 0x20);
- memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
- irdma_qp_ring_push_db(qp, wqe_idx);
- }
+ dma_wmb();
+ writel(qp->qp_id, qp->wqe_alloc_db);
}
/**
@@ -191,7 +132,6 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
{
__le64 *wqe;
__le64 *wqe_0 = NULL;
- u32 nop_wqe_idx;
u16 avail_quanta;
u16 i;
@@ -208,14 +148,10 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
- nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
for (i = 0; i < avail_quanta; i++) {
irdma_nop_1(qp);
IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
}
- if (qp->push_db && info->push_wqe)
- irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
- avail_quanta, nop_wqe_idx, true);
}
*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
@@ -233,6 +169,27 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+ qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
+
+ return wqe;
+}
+
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
+{
+ int ret_code;
+ __le64 *wqe;
+
+ if (IRDMA_RING_FULL_ERR(srq->srq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ srq->srwqe_polarity = !srq->srwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
return wqe;
}
@@ -245,7 +202,7 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
{
__le64 *wqe;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(qp->rq_ring))
return NULL;
@@ -268,28 +225,25 @@ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
u64 hdr;
__le64 *wqe;
struct irdma_rdma_write *op_info;
u32 i, wqe_idx;
u32 total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
read_fence |= info->read_fence;
@@ -305,12 +259,12 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
if (info->imm_data_valid) {
set_64bit_val(wqe, 0,
@@ -339,12 +293,11 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -353,13 +306,164 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_fetch_add - atomic fetch and add operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_fetch_add *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_fetch_add;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_compare_swap - atomic compare and swap operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_compare_swap *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_compare_swap;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->swap_data_bytes);
+ set_64bit_val(wqe, 40, op_info->compare_data_bytes);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
+ * @srq: shared rq ptr
+ * @info: post rq information
+ */
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (srq->max_srq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
+ srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ srq->srwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ srq->srwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ srq->srwqe_polarity);
+ if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
}
+ set_64bit_val(wqe, 16, (u64)info->wr_id);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
+
return 0;
}
@@ -370,12 +474,11 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
* @inv_stag: flag for inv_stag
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool inv_stag, bool post_sq)
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
{
struct irdma_rdma_read *op_info;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 i, byte_off, total_size = 0;
bool local_fence = false;
u32 addl_frag_cnt;
@@ -384,14 +487,12 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
u16 quanta;
u64 hdr;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
if (ret_code)
@@ -400,7 +501,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -426,13 +527,12 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
(inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -441,12 +541,9 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -457,27 +554,24 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_post_send *op_info;
u64 hdr;
u32 i, wqe_idx, total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (info->imm_data_valid)
frag_cnt = op_info->num_sges + 1;
@@ -490,7 +584,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -501,7 +595,8 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -531,7 +626,6 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -542,12 +636,9 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -569,21 +660,37 @@ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
/**
* irdma_copy_inline_data_gen_1 - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
* @polarity: compatibility parameter
*/
-static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
- u8 polarity)
+static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
- if (len <= 16) {
- memcpy(dest, src, len);
- } else {
- memcpy(dest, src, 16);
- src += 16;
- dest = dest + 32;
- memcpy(dest, src, len - 16);
+ u32 quanta_bytes_remaining = 16;
+ int i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
}
}
@@ -615,35 +722,51 @@ static void irdma_set_mw_bind_wqe(__le64 *wqe,
/**
* irdma_copy_inline_data - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
* @polarity: polarity of wqe valid bit
*/
-static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
- u32 copy_size;
-
- dest += 8;
- if (len <= 8) {
- memcpy(dest, src, len);
- return;
- }
-
- *((u64 *)dest) = *((u64 *)src);
- len -= 8;
- src += 8;
- dest += 24; /* point to additional 32 byte quanta */
-
- while (len) {
- copy_size = len < 31 ? len : 31;
- memcpy(dest, src, copy_size);
- *(dest + 31) = inline_valid;
- len -= copy_size;
- dest += 32;
- src += copy_size;
+ u32 quanta_bytes_remaining = 8;
+ bool first_quanta = true;
+ int i;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
}
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
}
/**
@@ -678,42 +801,46 @@ static u16 irdma_inline_data_size_to_quanta(u32 data_size)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_inline_rdma_write *op_info;
+ struct irdma_rdma_write *op_info;
u64 hdr = 0;
u32 wqe_idx;
bool read_fence = false;
+ u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_rdma_write;
+ op_info = &info->op.rdma_write;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
- if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
read_fence |= info->read_fence;
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -723,18 +850,15 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
set_64bit_val(wqe, 0,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges,
qp->swqe_polarity);
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -745,28 +869,33 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_post_inline_send *op_info;
+ struct irdma_post_send *op_info;
u64 hdr;
u32 wqe_idx;
bool read_fence = false;
+ u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_send;
+ op_info = &info->op.send;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
- if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -778,12 +907,11 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
- FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
(info->imm_data_valid ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -794,19 +922,15 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
if (info->imm_data_valid)
set_64bit_val(wqe, 0,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -817,35 +941,33 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info, bool post_sq)
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_inv_local_stag *op_info;
u64 hdr;
u32 wqe_idx;
bool local_fence = false;
- struct irdma_sge sge = {};
+ struct ib_sge sge = {};
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
- sge.stag = op_info->target_stag;
+ sge.lkey = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
set_64bit_val(wqe, 16, 0);
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -855,70 +977,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
-
- return 0;
-}
-
-/**
- * irdma_uk_mw_bind - bind Memory Window
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
-{
- __le64 *wqe;
- struct irdma_bind_window *op_info;
- u64 hdr;
- u32 wqe_idx;
- bool local_fence = false;
-
- info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.bind_window;
- local_fence |= info->local_fence;
-
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
- if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
-
- irdma_clr_wqes(qp, wqe_idx);
-
- qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
- FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
- ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
- FIELD_PREP(IRDMAQPSQ_VABASEDTO,
- (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
- (op_info->mem_window_type_1 ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
- FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
- FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
- FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
-
- dma_wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, hdr);
-
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -928,23 +988,20 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
* @qp: hw qp ptr
* @info: post rq information
*/
-enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
- struct irdma_post_rq_info *info)
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
{
- u32 total_size = 0, wqe_idx, i, byte_off;
+ u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
__le64 *wqe;
u64 hdr;
if (qp->max_rq_frag_cnt < info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
-
- for (i = 0; i < info->num_sges; i++)
- total_size += info->sg_list[i].len;
+ return -EINVAL;
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
qp->rq_wrid_array[wqe_idx] = info->wr_id;
addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
@@ -1056,19 +1113,43 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
}
/**
+ * irdma_uk_cq_empty - Check if CQ is empty
+ * @cq: hw cq
+ */
+bool irdma_uk_cq_empty(struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u8 polarity;
+ u64 qword3;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != cq->polarity;
+}
+
+/**
* irdma_uk_cq_poll_cmpl - get cq completion info
* @cq: hw cq
* @info: cq poll information returned
*/
-enum irdma_status_code
-irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
{
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
+ struct irdma_srq_uk *srq;
+ struct qp_err_code qp_err;
+ u8 is_srq;
struct irdma_ring *pring = NULL;
- u32 wqe_idx, q_type;
- enum irdma_status_code ret_code;
+ u32 wqe_idx;
+ int ret_code;
bool move_cq_head = true;
u8 polarity;
bool ext_valid;
@@ -1082,7 +1163,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure CQE contents are read after valid bit is checked */
dma_rmb();
@@ -1095,17 +1176,17 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure ext CQE contents are read after ext valid bit is checked */
dma_rmb();
@@ -1138,23 +1219,47 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
info->ud_vlan_valid = false;
}
- q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
- info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if (is_srq)
+ get_64bit_val(cqe, 40, (u64 *)&qp);
+ else
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ switch (info->major_err) {
+ case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
+ qp_err = irdma_ae_to_qp_err_code(info->minor_err);
+ info->minor_err = qp_err.flush_code;
+ fallthrough;
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
qword3 &= ~IRDMA_CQ_MINERR;
qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, 24, qword3);
}
- } else {
- info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
+#define IRDMA_CIE_SIGNATURE 0xE
+#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
+ if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
+ qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
+ FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
+ == IRDMA_CIE_SIGNATURE) {
+ info->error = 0;
+ info->major_err = 0;
+ info->minor_err = 0;
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1163,7 +1268,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
@@ -1172,13 +1276,34 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (!qp || qp->destroy_pending) {
- ret_code = IRDMA_ERR_Q_DESTROYED;
+ ret_code = -EFAULT;
goto exit;
}
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
+ unsigned long flags;
+
+ srq = qp->srq_uk;
- if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ get_64bit_val(cqe, 8, &info->wr_id);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
+ qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ spin_lock_irqsave(srq->lock, flags);
+ IRDMA_RING_MOVE_TAIL(srq->srq_ring);
+ spin_unlock_irqrestore(srq->lock, flags);
+ pring = &srq->srq_ring;
+
+ } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
u32 array_idx;
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
@@ -1186,7 +1311,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
@@ -1198,10 +1323,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
- if (info->imm_valid)
- info->op_type = IRDMA_OP_TYPE_REC_IMM;
- else
- info->op_type = IRDMA_OP_TYPE_REC;
if (qword3 & IRDMACQ_STAG) {
info->stag_invalid_set = true;
info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
@@ -1232,44 +1353,45 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
return irdma_uk_cq_poll_cmpl(cq, info);
}
}
- /*cease posting push mode on push drop*/
- if (info->push_dropped) {
- qp->push_mode = false;
- qp->push_dropped = true;
- }
if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
if (!info->comp_status)
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ if (!qp->sq_wrtrk_array[wqe_idx].signaled) {
+ ret_code = -EFAULT;
+ goto exit;
+ }
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, 24,
&wqe_qword);
- op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
- info->op_type = op_type;
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
+ wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
- if (op_type != IRDMAQP_OP_NOP) {
+ if (info->op_type != IRDMAQP_OP_NOP) {
info->wr_id = qp->sq_wrtrk_array[tail].wrid;
info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
break;
}
} while (1);
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
+ info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
@@ -1280,9 +1402,15 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
- move_cq_head = false;
+ /* Park CQ head during a flush to generate additional CQEs
+ * from SW for all unprocessed WQEs. For GEN3 and beyond
+ * FW will generate/flush these CQEs so move to the next CQE
+ */
+ move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
+ false : true;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1298,8 +1426,9 @@ exit:
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
if (!cq->avoid_mem_cflct && ext_valid)
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
qword3 &= ~IRDMA_CQ_WQEIDX;
qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
@@ -1310,10 +1439,10 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int irdma_qp_round_up(u32 wqdepth)
+static int irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1363,15 +1492,17 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
* @sqdepth: depth of SQ
*
*/
-enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 sq_size, u8 shift, u32 *sqdepth)
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *sqdepth)
{
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
+
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -1383,15 +1514,37 @@ enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
* @shift: shift which determines size of WQE
* @rqdepth: depth of RQ
*/
-enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 rq_size, u8 shift, u32 *rqdepth)
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *rqdepth)
{
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * irdma_get_srqdepth - get SRQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @srq_size: SRQ size
+ * @shift: shift which determines size of WQE
+ * @srqdepth: depth of SRQ
+ */
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth)
+{
+ *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *srqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ return -EINVAL;
return 0;
}
@@ -1428,7 +1581,114 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
qp->conn_wqes = move_cnt;
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
- IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_srq_init - initialize shared qp
+ * @srq: hw srq (user and kernel)
+ * @info: srq initialization info
+ *
+ * Initializes the vars used in both user and kernel mode.
+ * The size of the wqe depends on number of max fragments
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for srq.
+ */
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info)
+{
+ u8 rqshift;
+
+ srq->uk_attrs = info->uk_attrs;
+ if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
+ srq->srq_caps = info->srq_caps;
+ srq->srq_base = info->srq;
+ srq->shadow_area = info->shadow_area;
+ srq->srq_id = info->srq_id;
+ srq->srwqe_polarity = 0;
+ srq->srq_size = info->srq_size;
+ srq->wqe_size = rqshift;
+ srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
+ ((u32)2 << rqshift) - 1);
+ IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
+ srq->wqe_size_multiplier = 1 << rqshift;
+ srq->wqe_ops = iw_wqe_uk_ops;
+
+ return 0;
+}
+
+/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
}
/**
@@ -1441,28 +1701,16 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
* allowed. Then size of wqe * the number of wqes should be the
* amount of memory allocated for sq and rq.
*/
-enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
- struct irdma_qp_uk_init_info *info)
+int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 sq_ring_size;
- u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
-
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
- info->max_inline_data, &sqshift);
- if (info->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- } else {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
- info->max_inline_data, &sqshift);
- }
+ return -EINVAL;
+
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -1474,11 +1722,9 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
qp->wqe_alloc_db = info->wqe_alloc_db;
qp->qp_id = info->qp_id;
qp->sq_size = info->sq_size;
- qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
+ sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
- IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
irdma_setup_connection_wqes(qp, info);
qp->swqe_polarity = 1;
@@ -1491,13 +1737,14 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
- qp->rq_wqe_size = rqshift;
+ qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
- qp->rq_wqe_size_multiplier = 1 << rqshift;
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->srq_uk = info->srq_uk;
return ret_code;
}
@@ -1506,8 +1753,8 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
* @cq: hw cq
* @info: hw cq initialization info
*/
-enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
- struct irdma_cq_uk_init_info *info)
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
{
cq->cq_base = info->cq_base;
cq->cq_id = info->cq_id;
@@ -1518,8 +1765,6 @@ enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
cq->avoid_mem_cflct = info->avoid_mem_cflct;
IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
cq->polarity = 1;
-
- return 0;
}
/**
@@ -1547,6 +1792,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
if (polarity != temp)
break;
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
get_64bit_val(cqe, 8, &comp_ctx);
if ((void *)(unsigned long)comp_ctx == q)
set_64bit_val(cqe, 8, 0);
@@ -1564,20 +1812,18 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
* @signaled: signaled for completion
* @post_sq: ring doorbell
*/
-enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq)
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
{
__le64 *wqe;
u64 hdr;
u32 wqe_idx;
struct irdma_post_sq_info info = {};
- info.push_wqe = false;
info.wr_id = wr_id;
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, &info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -1603,7 +1849,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
* @frag_cnt: number of fragments
* @quanta: quanta for frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
{
switch (frag_cnt) {
case 0:
@@ -1639,7 +1885,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
*quanta = 8;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;
@@ -1650,7 +1896,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
* @frag_cnt: number of fragments
* @wqe_size: size in bytes given frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
{
switch (frag_cnt) {
case 0:
@@ -1677,7 +1923,7 @@ enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
*wqe_size = 256;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;