summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorMark Zhang <markzhang@nvidia.com>2023-01-04 11:43:34 +0200
committerLeon Romanovsky <leon@kernel.org>2023-01-15 12:23:10 +0200
commit312b8f79eb05479628ee71357749815b2eeeeea8 (patch)
tree5aa94795a7badd6a549a0bfb0cd07c4f68865179 /drivers/infiniband/hw/mlx5/qp.c
parent1ca49d26afc2609542be4a57e336191b525ca698 (diff)
RDMA/mlx: Calling qp event handler in workqueue context
Move the call of qp event handler from atomic to workqueue context, so that the handler is able to block. This is needed by following patches. Signed-off-by: Mark Zhang <markzhang@nvidia.com> Reviewed-by: Patrisious Haddad <phaddad@nvidia.com> Link: https://lore.kernel.org/r/0cd17b8331e445f03942f4bb28d447f24ac5669d.1672821186.git.leonro@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c119
1 files changed, 84 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cf953d23d18d..9b85e1be0aed 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -71,6 +71,14 @@ struct mlx5_modify_raw_qp_param {
u32 port;
};
+struct mlx5_ib_qp_event_work {
+ struct work_struct work;
+ struct mlx5_core_qp *qp;
+ int type;
+};
+
+static struct workqueue_struct *mlx5_ib_qp_event_wq;
+
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
@@ -302,51 +310,78 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
}
+static void mlx5_ib_handle_qp_event(struct work_struct *_work)
+{
+ struct mlx5_ib_qp_event_work *qpe_work =
+ container_of(_work, struct mlx5_ib_qp_event_work, work);
+ struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
+ struct ib_event event = {};
+
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (qpe_work->type) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX5_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n",
+ qpe_work->type, qpe_work->qp->qpn);
+ goto out;
+ }
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+
+out:
+ mlx5_core_res_put(&qpe_work->qp->common);
+ kfree(qpe_work);
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
- struct ib_event event;
+ struct mlx5_ib_qp_event_work *qpe_work;
if (type == MLX5_EVENT_TYPE_PATH_MIG) {
/* This event is only valid for trans_qps */
to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
}
- if (ibqp->event_handler) {
- event.device = ibqp->device;
- event.element.qp = ibqp;
- switch (type) {
- case MLX5_EVENT_TYPE_PATH_MIG:
- event.event = IB_EVENT_PATH_MIG;
- break;
- case MLX5_EVENT_TYPE_COMM_EST:
- event.event = IB_EVENT_COMM_EST;
- break;
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- break;
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- event.event = IB_EVENT_QP_LAST_WQE_REACHED;
- break;
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- event.event = IB_EVENT_QP_FATAL;
- break;
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- event.event = IB_EVENT_PATH_MIG_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- event.event = IB_EVENT_QP_REQ_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- break;
- default:
- pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
- return;
- }
+ if (!ibqp->event_handler)
+ goto out_no_handler;
- ibqp->event_handler(&event, ibqp->qp_context);
- }
+ qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
+ if (!qpe_work)
+ goto out_no_handler;
+
+ qpe_work->qp = qp;
+ qpe_work->type = type;
+ INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
+ queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
+ return;
+
+out_no_handler:
+ mlx5_core_res_put(&qp->common);
}
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
@@ -5720,3 +5755,17 @@ out:
mutex_unlock(&mqp->mutex);
return err;
}
+
+int mlx5_ib_qp_event_init(void)
+{
+ mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0);
+ if (!mlx5_ib_qp_event_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_ib_qp_event_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_qp_event_wq);
+}