diff options
author | Guoqing Jiang <guoqing.jiang@linux.dev> | 2023-11-13 19:57:24 +0800 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2023-11-15 15:58:14 +0200 |
commit | 788bbf4c2fc6e0c35bae9ed5068f484272539d3e (patch) | |
tree | 955cba90000ac34cbcc1e4dfaeb2b4dd1f17cbc0 /drivers/infiniband | |
parent | 3beced14d1998c8e0c5d3d78b2dd86255365e705 (diff) |
RDMA/siw: Only check attrs->cap.max_send_wr in siw_create_qp
We can just check max_send_wr here given both max_send_wr and
max_recv_wr are defined as u32 type, and we also need to ensure
num_sqe (derived from max_send_wr) shouldn't be zero.
Acked-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Guoqing Jiang <guoqing.jiang@linux.dev>
Link: https://lore.kernel.org/r/20231113115726.12762-16-guoqing.jiang@linux.dev
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/sw/siw/siw_verbs.c | 18 |
1 files changed, 5 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index aad0a7d8789f..dca6a155523d 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -333,11 +333,10 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, goto err_atomic; } /* - * NOTE: we allow for zero element SQ and RQ WQE's SGL's - * but not for a QP unable to hold any WQE (SQ + RQ) + * NOTE: we don't allow for a QP unable to hold any SQ WQE */ - if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { - siw_dbg(base_dev, "QP must have send or receive queue\n"); + if (attrs->cap.max_send_wr == 0) { + siw_dbg(base_dev, "QP must have send queue\n"); rv = -EINVAL; goto err_atomic; } @@ -357,21 +356,14 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (rv) goto err_atomic; - num_sqe = attrs->cap.max_send_wr; - num_rqe = attrs->cap.max_recv_wr; /* All queue indices are derived from modulo operations * on a free running 'get' (consumer) and 'put' (producer) * unsigned counter. Having queue sizes at power of two * avoids handling counter wrap around. */ - if (num_sqe) - num_sqe = roundup_pow_of_two(num_sqe); - else { - /* Zero sized SQ is not supported */ - rv = -EINVAL; - goto err_out_xa; - } + num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr); + num_rqe = attrs->cap.max_recv_wr; if (num_rqe) num_rqe = roundup_pow_of_two(num_rqe); |