summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mana')
-rw-r--r--drivers/infiniband/hw/mana/cq.c26
-rw-r--r--drivers/infiniband/hw/mana/device.c3
-rw-r--r--drivers/infiniband/hw/mana/main.c5
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h14
-rw-r--r--drivers/infiniband/hw/mana/mr.c6
-rw-r--r--drivers/infiniband/hw/mana/qp.c9
6 files changed, 56 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 28e154bbb50f..1becc8779123 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -291,6 +291,32 @@ out:
return wc_index;
}
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ struct mana_ib_cq *cq;
+ unsigned long flags;
+
+ if (!qp)
+ return;
+
+ cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+ != NULL) {
+ shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ mana_put_qp_ref(qp);
+}
+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index fa60872f169f..bdeddb642b87 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -230,6 +230,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+ if (mana_ib_is_rnic(dev))
+ mana_drain_gsi_sqs(dev);
+
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 6a2471f2e804..fac159f7128d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -273,9 +273,8 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
- return err;
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ return PTR_ERR(umem);
}
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 5d31034ac7fb..9d36232ed880 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -43,6 +43,8 @@
*/
#define MANA_AV_BUFFER_SIZE 64
+#define MANA_GSI_QPN (1)
+
struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
@@ -410,7 +412,7 @@ struct mana_ib_ah_attr {
u8 traffic_class;
u16 src_port;
u16 dest_port;
- u32 reserved;
+ u32 flow_label;
};
struct mana_rnic_set_qp_state_req {
@@ -427,8 +429,15 @@ struct mana_rnic_set_qp_state_req {
u32 retry_cnt;
u32 rnr_retry;
u32 min_rnr_timer;
- u32 reserved;
+ u32 rate_limit;
struct mana_ib_ah_attr ah_attr;
+ u64 reserved1;
+ u32 qkey;
+ u32 qp_access_flags;
+ u8 local_ack_timeout;
+ u8 max_rd_atomic;
+ u16 reserved2;
+ u32 reserved3;
}; /* HW Data */
struct mana_rnic_set_qp_state_resp {
@@ -718,6 +727,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 55701046ffba..3d0245a4c1ed 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -138,7 +138,8 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(ibdev,
- "Failed to get umem for register user-mr, %d\n", err);
+ "Failed to get umem for register user-mr, %pe\n",
+ mr->umem);
goto err_free;
}
@@ -220,7 +221,8 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
+ umem_dmabuf);
goto err_free;
}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index a6bf4d539e67..48c1f4977f21 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
+
+ req.hdr.req.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
@@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.retry_cnt = attr->retry_cnt;
req.rnr_retry = attr->rnr_retry;
req.min_rnr_timer = attr->min_rnr_timer;
+ req.rate_limit = attr->rate_limit;
+ req.qkey = attr->qkey;
+ req.local_ack_timeout = attr->timeout;
+ req.qp_access_flags = attr->qp_access_flags;
+ req.max_rd_atomic = attr->max_rd_atomic;
+
if (attr_mask & IB_QP_AV) {
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
if (!ndev) {
@@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ibqp->qp_num, attr->dest_qp_num);
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);