summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/bnxt_re/ib_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c144
1 files changed, 56 insertions, 88 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a76e206704d4..bbfb86eb2d24 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -166,7 +166,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_MEM_MGT_EXTENSIONS;
- ib_attr->max_sge = dev_attr->max_qp_sges;
+ ib_attr->max_send_sge = dev_attr->max_qp_sges;
+ ib_attr->max_recv_sge = dev_attr->max_qp_sges;
ib_attr->max_sge_rd = dev_attr->max_qp_sges;
ib_attr->max_cq = dev_attr->max_cq;
ib_attr->max_cqe = dev_attr->max_cq_wqes;
@@ -243,8 +244,8 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->gid_tbl_len = dev_attr->max_sgid;
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ IB_PORT_VENDOR_CLASS_SUP;
+ port_attr->ip_gids = true;
port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
@@ -364,8 +365,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
return rc;
}
-int bnxt_re_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context)
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
{
int rc;
u32 tbl_idx = 0;
@@ -377,7 +377,7 @@ int bnxt_re_add_gid(const union ib_gid *gid,
if ((attr->ndev) && is_vlan_dev(attr->ndev))
vlan_id = vlan_dev_vlan_id(attr->ndev);
- rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
+ rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
vlan_id, true, &tbl_idx);
if (rc == -EALREADY) {
@@ -673,8 +673,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
int rc;
u8 nw_type;
- struct ib_gid_attr sgid_attr;
-
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
return ERR_PTR(-EINVAL);
@@ -705,20 +703,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
grh->dgid.raw)) {
- union ib_gid sgid;
+ const struct ib_gid_attr *sgid_attr;
- rc = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to query gid at index %d",
- grh->sgid_index);
- goto fail;
- }
- dev_put(sgid_attr.ndev);
+ sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
@@ -1408,7 +1397,7 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
}
if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto exit;
}
@@ -1530,8 +1519,8 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
return 0;
}
-int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
@@ -1599,9 +1588,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
- int status;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
unsigned int flags;
u8 nw_type;
@@ -1668,6 +1654,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_AV) {
const struct ib_global_route *grh =
rdma_ah_read_grh(&qp_attr->ah_attr);
+ const struct ib_gid_attr *sgid_attr;
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
@@ -1691,29 +1678,23 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
- status = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index,
- &sgid, &sgid_attr);
- if (!status) {
- memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
- ETH_ALEN);
- dev_put(sgid_attr.ndev);
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
- &sgid);
- switch (nw_type) {
- case RDMA_NETWORK_IPV4:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
- break;
- case RDMA_NETWORK_IPV6:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
- break;
- default:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
- break;
- }
+ sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
+ memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
+ ETH_ALEN);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
+ break;
+ default:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
+ break;
}
}
@@ -1895,19 +1876,17 @@ out:
/* Routine for sending QP1 packets for RoCE V1 an V2
*/
static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
- struct ib_device *ibdev = &qp->rdev->ibdev;
struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
ib_ah);
struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
+ const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
struct bnxt_qplib_sge sge;
- union ib_gid sgid;
u8 nw_type;
u16 ether_type;
- struct ib_gid_attr sgid_attr;
union ib_gid dgid;
bool is_eth = false;
bool is_vlan = false;
@@ -1920,22 +1899,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
- rc = ib_get_cached_gid(ibdev, 1,
- qplib_ah->host_sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(qp->rdev),
- "Failed to query gid at index %d",
- qplib_ah->host_sgid_index);
- return rc;
- }
- if (sgid_attr.ndev) {
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
- dev_put(sgid_attr.ndev);
- }
+ if (is_vlan_dev(sgid_attr->ndev))
+ vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
@@ -1948,9 +1915,9 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
break;
}
memcpy(&dgid.raw, &qplib_ah->dgid, 16);
- is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+ is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
if (is_udp) {
- if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
ip_version = 4;
ether_type = ETH_P_IP;
} else {
@@ -1983,9 +1950,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
if (is_grh || (ip_version == 6)) {
- memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
+ memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
+ sizeof(sgid_attr->gid));
memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
- sizeof(sgid));
+ sizeof(sgid_attr->gid));
qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
}
@@ -1995,7 +1963,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
- memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
+ memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
}
@@ -2080,7 +2048,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
* and the MAD datagram out to the provided SGE.
*/
static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr,
+ const struct ib_recv_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
@@ -2125,7 +2093,7 @@ static int is_ud_qp(struct bnxt_re_qp *qp)
}
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_ah *ah = NULL;
@@ -2163,7 +2131,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
return 0;
}
-static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2195,7 +2163,7 @@ static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2222,7 +2190,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
@@ -2241,7 +2209,7 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
+static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
@@ -2283,7 +2251,7 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
}
static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
/* Copy the inline data to the data field */
@@ -2313,7 +2281,7 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
}
static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
int payload_sz = 0;
@@ -2345,7 +2313,7 @@ static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
@@ -2393,8 +2361,8 @@ bad:
return rc;
}
-int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
@@ -2441,7 +2409,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
default:
break;
}
- /* Fall thru to build the wqe */
+ /* fall through */
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
break;
@@ -2493,7 +2461,7 @@ bad:
static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0;
@@ -2526,8 +2494,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
return rc;
}
-int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;