diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma/uk.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/uk.c | 601 |
1 files changed, 435 insertions, 166 deletions
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index 16183e894da7..f0846b800913 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "defs.h" @@ -93,16 +93,18 @@ static int irdma_nop_1(struct irdma_qp_uk *qp) */ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) { - __le64 *wqe; + struct irdma_qp_quanta *sq; u32 wqe_idx; if (!(qp_wqe_idx & 0x7F)) { wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; - wqe = qp->sq_base[wqe_idx].elem; + sq = qp->sq_base + wqe_idx; if (wqe_idx) - memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000); + memset(sq, qp->swqe_polarity ? 0 : 0xFF, + 128 * sizeof(*sq)); else - memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000); + memset(sq, qp->swqe_polarity ? 0xFF : 0, + 128 * sizeof(*sq)); } } @@ -112,68 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) */ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) { - u64 temp; - u32 hw_sq_tail; - u32 sw_sq_head; - - /* valid bit is written and loads completed before reading shadow */ - mb(); - - /* read the doorbell shadow area */ - get_64bit_val(qp->shadow_area, 0, &temp); - - hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); - sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); - if (sw_sq_head != qp->initial_ring.head) { - if (qp->push_dropped) { - writel(qp->qp_id, qp->wqe_alloc_db); - qp->push_dropped = false; - } else if (sw_sq_head != hw_sq_tail) { - if (sw_sq_head > qp->initial_ring.head) { - if (hw_sq_tail >= qp->initial_ring.head && - hw_sq_tail < sw_sq_head) - writel(qp->qp_id, qp->wqe_alloc_db); - } else { - if (hw_sq_tail >= qp->initial_ring.head || - hw_sq_tail < sw_sq_head) - writel(qp->qp_id, qp->wqe_alloc_db); - } - } - } - - qp->initial_ring.head = qp->sq_ring.head; -} - -/** - * irdma_qp_ring_push_db - ring qp doorbell - * @qp: hw qp ptr - * @wqe_idx: wqe index - */ -static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx) -{ - set_32bit_val(qp->push_db, 0, - FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id); - qp->initial_ring.head = qp->sq_ring.head; - qp->push_mode = true; - qp->push_dropped = false; -} - -void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, - u32 wqe_idx, bool post_sq) -{ - __le64 *push; - - if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) != - IRDMA_RING_CURRENT_TAIL(qp->sq_ring) && - !qp->push_mode) { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } else { - push = (__le64 *)((uintptr_t)qp->push_wqe + - (wqe_idx & 0x7) * 0x20); - memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE); - irdma_qp_ring_push_db(qp, wqe_idx); - } + dma_wmb(); + writel(qp->qp_id, qp->wqe_alloc_db); } /** @@ -190,7 +132,6 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, { __le64 *wqe; __le64 *wqe_0 = NULL; - u32 nop_wqe_idx; u16 avail_quanta; u16 i; @@ -207,14 +148,10 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) return NULL; - nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); for (i = 0; i < avail_quanta; i++) { irdma_nop_1(qp); IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); } - if (qp->push_db && info->push_wqe) - irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem, - avail_quanta, nop_wqe_idx, true); } *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); @@ -232,6 +169,27 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; + qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled; + + return wqe; +} + +__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx) +{ + int ret_code; + __le64 *wqe; + + if (IRDMA_RING_FULL_ERR(srq->srq_ring)) + return NULL; + + IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + if (!*wqe_idx) + srq->srwqe_polarity = !srq->srwqe_polarity; + /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */ + wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem; return wqe; } @@ -280,8 +238,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool read_fence = false; u16 quanta; - info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.rdma_write; if (op_info->num_lo_sges > qp->max_sq_frag_cnt) return -EINVAL; @@ -342,7 +298,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -351,13 +306,164 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); + + if (post_sq) + irdma_uk_qp_post_wr(qp); + + return 0; +} + +/** + * irdma_uk_atomic_fetch_add - atomic fetch and add operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq) +{ + struct irdma_atomic_fetch_add *op_info; + u32 total_size = 0; + u16 quanta = 2; + u32 wqe_idx; + __le64 *wqe; + u64 hdr; + + op_info = &info->op.atomic_fetch_add; + wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, + info); + if (!wqe) + return -ENOMEM; + + set_64bit_val(wqe, 0, op_info->tagged_offset); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag)); + set_64bit_val(wqe, 16, op_info->remote_tagged_offset); + + hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) | + FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) | + FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes); + set_64bit_val(wqe, 40, 0); + set_64bit_val(wqe, 48, 0); + set_64bit_val(wqe, 56, + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + if (post_sq) + irdma_uk_qp_post_wr(qp); + + return 0; +} + +/** + * irdma_uk_atomic_compare_swap - atomic compare and swap operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq) +{ + struct irdma_atomic_compare_swap *op_info; + u32 total_size = 0; + u16 quanta = 2; + u32 wqe_idx; + __le64 *wqe; + u64 hdr; + + op_info = &info->op.atomic_compare_swap; + wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, + info); + if (!wqe) + return -ENOMEM; + + set_64bit_val(wqe, 0, op_info->tagged_offset); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag)); + set_64bit_val(wqe, 16, op_info->remote_tagged_offset); + + hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) | + FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) | + FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + set_64bit_val(wqe, 32, op_info->swap_data_bytes); + set_64bit_val(wqe, 40, op_info->compare_data_bytes); + set_64bit_val(wqe, 48, 0); + set_64bit_val(wqe, 56, + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + if (post_sq) + irdma_uk_qp_post_wr(qp); + + return 0; +} + +/** + * irdma_uk_srq_post_receive - post a receive wqe to a shared rq + * @srq: shared rq ptr + * @info: post rq information + */ +int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq, + struct irdma_post_rq_info *info) +{ + u32 wqe_idx, i, byte_off; + u32 addl_frag_cnt; + __le64 *wqe; + u64 hdr; + + if (srq->max_srq_frag_cnt < info->num_sges) + return -EINVAL; + + wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx); + if (!wqe) + return -ENOMEM; + + addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0; + srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list, + srq->srwqe_polarity); + + for (i = 1, byte_off = 32; i < info->num_sges; i++) { + srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i], + srq->srwqe_polarity); + byte_off += 16; } + /* if not an odd number set valid bit in next fragment */ + if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) && + info->num_sges) { + srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + srq->srwqe_polarity); + if (srq->uk_attrs->hw_rev == IRDMA_GEN_2) + ++addl_frag_cnt; + } + + set_64bit_val(wqe, 16, (u64)info->wr_id); + hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size); + return 0; } @@ -381,8 +487,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, u16 quanta; u64 hdr; - info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.rdma_read; if (qp->max_sq_frag_cnt < op_info->num_lo_sges) return -EINVAL; @@ -429,7 +533,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_OPCODE, (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -438,12 +541,9 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -466,8 +566,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool read_fence = false; u16 quanta; - info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.send; if (qp->max_sq_frag_cnt < op_info->num_sges) return -EINVAL; @@ -528,7 +626,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -539,12 +636,9 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -718,7 +812,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, u32 i, total_size = 0; u16 quanta; - info->push_wqe = qp->push_db ? true : false; op_info = &info->op.rdma_write; if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) @@ -748,7 +841,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -765,12 +857,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -792,7 +880,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, u32 i, total_size = 0; u16 quanta; - info->push_wqe = qp->push_db ? true : false; op_info = &info->op.send; if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) @@ -825,7 +912,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -843,12 +929,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -870,7 +952,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, bool local_fence = false; struct ib_sge sge = {}; - info->push_wqe = qp->push_db ? true : false; op_info = &info->op.inv_local_stag; local_fence = info->local_fence; @@ -887,7 +968,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | @@ -897,13 +977,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, set_64bit_val(wqe, 24, hdr); - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx, - post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } + if (post_sq) + irdma_uk_qp_post_wr(qp); return 0; } @@ -1038,6 +1113,27 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, } /** + * irdma_uk_cq_empty - Check if CQ is empty + * @cq: hw cq + */ +bool irdma_uk_cq_empty(struct irdma_cq_uk *cq) +{ + __le64 *cqe; + u8 polarity; + u64 qword3; + + if (cq->avoid_mem_cflct) + cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq); + else + cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); + + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); + + return polarity != cq->polarity; +} + +/** * irdma_uk_cq_poll_cmpl - get cq completion info * @cq: hw cq * @info: cq poll information returned @@ -1048,6 +1144,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, u64 comp_ctx, qword0, qword2, qword3; __le64 *cqe; struct irdma_qp_uk *qp; + struct irdma_srq_uk *srq; + struct qp_err_code qp_err; + u8 is_srq; struct irdma_ring *pring = NULL; u32 wqe_idx; int ret_code; @@ -1121,22 +1220,46 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, } info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); + is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3); info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); - info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3); info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); + get_64bit_val(cqe, 8, &comp_ctx); + if (is_srq) + get_64bit_val(cqe, 40, (u64 *)&qp); + else + qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; if (info->error) { info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3); info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3); - if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) { - info->comp_status = IRDMA_COMPL_STATUS_FLUSHED; + switch (info->major_err) { + case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR: + qp_err = irdma_ae_to_qp_err_code(info->minor_err); + info->minor_err = qp_err.flush_code; + fallthrough; + case IRDMA_FLUSH_MAJOR_ERR: /* Set the min error to standard flush error code for remaining cqes */ if (info->minor_err != FLUSH_GENERAL_ERR) { qword3 &= ~IRDMA_CQ_MINERR; qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR); set_64bit_val(cqe, 24, qword3); } - } else { - info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; + info->comp_status = IRDMA_COMPL_STATUS_FLUSHED; + break; + default: +#define IRDMA_CIE_SIGNATURE 0xE +#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12) + if (info->q_type == IRDMA_CQE_QTYPE_SQ && + qp->qp_type == IRDMA_QP_TYPE_ROCE_UD && + FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err) + == IRDMA_CIE_SIGNATURE) { + info->error = 0; + info->major_err = 0; + info->minor_err = 0; + info->comp_status = IRDMA_COMPL_STATUS_SUCCESS; + } else { + info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; + } + break; } } else { info->comp_status = IRDMA_COMPL_STATUS_SUCCESS; @@ -1145,7 +1268,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); - info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0); info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); @@ -1159,9 +1281,29 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); info->qp_handle = (irdma_qp_handle)(unsigned long)qp; - info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); + info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); + + if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) { + unsigned long flags; + + srq = qp->srq_uk; + + get_64bit_val(cqe, 8, &info->wr_id); + info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); + + if (qword3 & IRDMACQ_STAG) { + info->stag_invalid_set = true; + info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, + qword2); + } else { + info->stag_invalid_set = false; + } + spin_lock_irqsave(srq->lock, flags); + IRDMA_RING_MOVE_TAIL(srq->srq_ring); + spin_unlock_irqrestore(srq->lock, flags); + pring = &srq->srq_ring; - if (info->q_type == IRDMA_CQE_QTYPE_RQ) { + } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) { u32 array_idx; array_idx = wqe_idx / qp->rq_wqe_size_multiplier; @@ -1211,15 +1353,14 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, return irdma_uk_cq_poll_cmpl(cq, info); } } - /*cease posting push mode on push drop*/ - if (info->push_dropped) { - qp->push_mode = false; - qp->push_dropped = true; - } if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) { info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; if (!info->comp_status) info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; + if (!qp->sq_wrtrk_array[wqe_idx].signaled) { + ret_code = -EFAULT; + goto exit; + } info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); IRDMA_RING_SET_TAIL(qp->sq_ring, wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); @@ -1261,9 +1402,15 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, ret_code = 0; exit: - if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) + if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) { if (pring && IRDMA_RING_MORE_WORK(*pring)) - move_cq_head = false; + /* Park CQ head during a flush to generate additional CQEs + * from SW for all unprocessed WQEs. For GEN3 and beyond + * FW will generate/flush these CQEs so move to the next CQE + */ + move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ? + false : true; + } if (move_cq_head) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); @@ -1279,8 +1426,9 @@ exit: IRDMA_RING_MOVE_TAIL(cq->cq_ring); if (!cq->avoid_mem_cflct && ext_valid) IRDMA_RING_MOVE_TAIL(cq->cq_ring); - set_64bit_val(cq->shadow_area, 0, - IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); + if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq)) + set_64bit_val(cq->shadow_area, 0, + IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); } else { qword3 &= ~IRDMA_CQ_WQEIDX; qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail); @@ -1291,10 +1439,10 @@ exit: } /** - * irdma_qp_round_up - return round up qp wq depth + * irdma_round_up_wq - return round up qp wq depth * @wqdepth: wq depth in quanta to round up */ -static int irdma_qp_round_up(u32 wqdepth) +static int irdma_round_up_wq(u32 wqdepth) { int scount = 1; @@ -1347,10 +1495,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth) { - *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD); + u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; + + *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD); - if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) - *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; + if (*sqdepth < min_size) + *sqdepth = min_size; else if (*sqdepth > uk_attrs->max_hw_wq_quanta) return -EINVAL; @@ -1367,16 +1517,38 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth) { - *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD); + u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; - if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) - *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; + *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD); + + if (*rqdepth < min_size) + *rqdepth = min_size; else if (*rqdepth > uk_attrs->max_hw_rq_quanta) return -EINVAL; return 0; } +/* + * irdma_get_srqdepth - get SRQ depth (quanta) + * @uk_attrs: qp HW attributes + * @srq_size: SRQ size + * @shift: shift which determines size of WQE + * @srqdepth: depth of SRQ + */ +int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, + u32 *srqdepth) +{ + *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD); + + if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift)) + *srqdepth = uk_attrs->min_hw_wq_size << shift; + else if (*srqdepth > uk_attrs->max_hw_srq_quanta) + return -EINVAL; + + return 0; +} + static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = { .iw_copy_inline_data = irdma_copy_inline_data, .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta, @@ -1409,7 +1581,114 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, qp->conn_wqes = move_cnt; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt); - IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt); +} + +/** + * irdma_uk_srq_init - initialize shared qp + * @srq: hw srq (user and kernel) + * @info: srq initialization info + * + * Initializes the vars used in both user and kernel mode. + * The size of the wqe depends on number of max fragments + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for srq. + */ +int irdma_uk_srq_init(struct irdma_srq_uk *srq, + struct irdma_srq_uk_init_info *info) +{ + u8 rqshift; + + srq->uk_attrs = info->uk_attrs; + if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags) + return -EINVAL; + + irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift); + srq->srq_caps = info->srq_caps; + srq->srq_base = info->srq; + srq->shadow_area = info->shadow_area; + srq->srq_id = info->srq_id; + srq->srwqe_polarity = 0; + srq->srq_size = info->srq_size; + srq->wqe_size = rqshift; + srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags, + ((u32)2 << rqshift) - 1); + IRDMA_RING_INIT(srq->srq_ring, srq->srq_size); + srq->wqe_size_multiplier = 1 << rqshift; + srq->wqe_ops = iw_wqe_uk_ops; + + return 0; +} + +/** + * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ + * @ukinfo: qp initialization info + * @sq_shift: Returns shift of SQ + * @rq_shift: Returns shift of RQ + */ +void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, + u8 *rq_shift) +{ + bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; + + irdma_get_wqe_shift(ukinfo->uk_attrs, + imm_support ? ukinfo->max_sq_frag_cnt + 1 : + ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, sq_shift); + + irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, + rq_shift); + + if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { + if (ukinfo->abi_ver > 4) + *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; + } +} + +/** + * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size. + * @ukinfo: qp initialization info + * @sq_depth: Returns depth of SQ + * @sq_shift: Returns shift of SQ + */ +int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, + u32 *sq_depth, u8 *sq_shift) +{ + bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; + int status; + + irdma_get_wqe_shift(ukinfo->uk_attrs, + imm_support ? ukinfo->max_sq_frag_cnt + 1 : + ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, sq_shift); + status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size, + *sq_shift, sq_depth); + + return status; +} + +/** + * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size. + * @ukinfo: qp initialization info + * @rq_depth: Returns depth of RQ + * @rq_shift: Returns shift of RQ + */ +int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, + u32 *rq_depth, u8 *rq_shift) +{ + int status; + + irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, + rq_shift); + + if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { + if (ukinfo->abi_ver > 4) + *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; + } + + status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size, + *rq_shift, rq_depth); + + return status; } /** @@ -1426,23 +1705,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { int ret_code = 0; u32 sq_ring_size; - u8 sqshift, rqshift; qp->uk_attrs = info->uk_attrs; if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) return -EINVAL; - irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift); - if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) { - irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt, - info->max_inline_data, &sqshift); - if (info->abi_ver > 4) - rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; - } else { - irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1, - info->max_inline_data, &sqshift); - } qp->qp_caps = info->qp_caps; qp->sq_base = info->sq; qp->rq_base = info->rq; @@ -1454,11 +1722,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->wqe_alloc_db = info->wqe_alloc_db; qp->qp_id = info->qp_id; qp->sq_size = info->sq_size; - qp->push_mode = false; qp->max_sq_frag_cnt = info->max_sq_frag_cnt; - sq_ring_size = qp->sq_size << sqshift; + sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); - IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); if (info->first_sq_wq) { irdma_setup_connection_wqes(qp, info); qp->swqe_polarity = 1; @@ -1471,13 +1737,14 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->rq_size = info->rq_size; qp->max_rq_frag_cnt = info->max_rq_frag_cnt; qp->max_inline_data = info->max_inline_data; - qp->rq_wqe_size = rqshift; + qp->rq_wqe_size = info->rq_shift; IRDMA_RING_INIT(qp->rq_ring, qp->rq_size); - qp->rq_wqe_size_multiplier = 1 << rqshift; + qp->rq_wqe_size_multiplier = 1 << info->rq_shift; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) qp->wqe_ops = iw_wqe_uk_ops_gen_1; else qp->wqe_ops = iw_wqe_uk_ops; + qp->srq_uk = info->srq_uk; return ret_code; } @@ -1525,6 +1792,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) if (polarity != temp) break; + /* Ensure CQE contents are read after valid bit is checked */ + dma_rmb(); + get_64bit_val(cqe, 8, &comp_ctx); if ((void *)(unsigned long)comp_ctx == q) set_64bit_val(cqe, 8, 0); @@ -1549,7 +1819,6 @@ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) u32 wqe_idx; struct irdma_post_sq_info info = {}; - info.push_wqe = false; info.wr_id = wr_id; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &info); |
