diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
| -rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 193 |
1 files changed, 125 insertions, 68 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 4a2ef7daaded..f2887ae6390e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -102,6 +102,14 @@ enum mlx4_ib_source_type { MLX4_IB_RWQ_SRC = 1, }; +struct mlx4_ib_qp_event_work { + struct work_struct work; + struct mlx4_qp *qp; + enum mlx4_event type; +}; + +static struct workqueue_struct *mlx4_ib_qp_event_wq; + static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) @@ -200,50 +208,77 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) } } +static void mlx4_ib_handle_qp_event(struct work_struct *_work) +{ + struct mlx4_ib_qp_event_work *qpe_work = + container_of(_work, struct mlx4_ib_qp_event_work, work); + struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; + struct ib_event event = {}; + + event.device = ibqp->device; + event.element.qp = ibqp; + + switch (qpe_work->type) { + case MLX4_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX4_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX4_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("Unexpected event type %d on QP %06x\n", + qpe_work->type, qpe_work->qp->qpn); + goto out; + } + + ibqp->event_handler(&event, ibqp->qp_context); + +out: + mlx4_put_qp(qpe_work->qp); + kfree(qpe_work); +} + static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { - struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + struct mlx4_ib_qp_event_work *qpe_work; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; - if (ibqp->event_handler) { - event.device = ibqp->device; - event.element.qp = ibqp; - switch (type) { - case MLX4_EVENT_TYPE_PATH_MIG: - event.event = IB_EVENT_PATH_MIG; - break; - case MLX4_EVENT_TYPE_COMM_EST: - event.event = IB_EVENT_COMM_EST; - break; - case MLX4_EVENT_TYPE_SQ_DRAINED: - event.event = IB_EVENT_SQ_DRAINED; - break; - case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: - event.event = IB_EVENT_QP_LAST_WQE_REACHED; - break; - case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: - event.event = IB_EVENT_QP_FATAL; - break; - case MLX4_EVENT_TYPE_PATH_MIG_FAILED: - event.event = IB_EVENT_PATH_MIG_ERR; - break; - case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: - event.event = IB_EVENT_QP_REQ_ERR; - break; - case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: - event.event = IB_EVENT_QP_ACCESS_ERR; - break; - default: - pr_warn("Unexpected event type %d " - "on QP %06x\n", type, qp->qpn); - return; - } + if (!ibqp->event_handler) + goto out_no_handler; - ibqp->event_handler(&event, ibqp->qp_context); - } + qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); + if (!qpe_work) + goto out_no_handler; + + qpe_work->qp = qp; + qpe_work->type = type; + INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event); + queue_work(mlx4_ib_qp_event_wq, &qpe_work->work); + return; + +out_no_handler: + mlx4_put_qp(qp); } static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) @@ -412,9 +447,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { + u32 cnt; + /* Sanity check SQ size before proceeding */ - if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || - ucmd->log_sq_stride > + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || + cnt > dev->dev->caps.max_wqes) + return -EINVAL; + if (ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; @@ -526,15 +565,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | - MLX4_IB_RX_HASH_DST_IPV4 | - MLX4_IB_RX_HASH_SRC_IPV6 | - MLX4_IB_RX_HASH_DST_IPV6 | - MLX4_IB_RX_HASH_SRC_PORT_TCP | - MLX4_IB_RX_HASH_DST_PORT_TCP | - MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP | - MLX4_IB_RX_HASH_INNER)) { + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); @@ -886,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); - err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); + if (shift < 0) { + err = shift; + goto err_buf; + } + err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; @@ -1069,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); - err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); + if (shift < 0) { + err = shift; + goto err_buf; + } + err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; @@ -1099,8 +1146,10 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; - else + else { + err = -EINVAL; goto err; + } } err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); @@ -1578,24 +1627,19 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, return 0; } -struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) { - struct ib_device *device = pd ? pd->device : init_attr->xrcd->device; +int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_device *device = ibqp->device; struct mlx4_ib_dev *dev = to_mdev(device); - struct mlx4_ib_qp *qp; + struct mlx4_ib_qp *qp = to_mqp(ibqp); + struct ib_pd *pd = ibqp->pd; int ret; - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) - return ERR_PTR(-ENOMEM); - mutex_init(&qp->mutex); ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); - if (ret) { - kfree(qp); - return ERR_PTR(ret); - } + if (ret) + return ret; if (init_attr->qp_type == IB_QPT_GSI && !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) { @@ -1608,7 +1652,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); if (IS_ERR(sqp->roce_v2_gsi)) { - pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); + pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n", + sqp->roce_v2_gsi); sqp->roce_v2_gsi = NULL; } else { to_mqp(sqp->roce_v2_gsi)->flags |= @@ -1618,7 +1663,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI; } } - return &qp->ibqp; + return 0; } static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) @@ -1646,8 +1691,6 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) } kfree(mqp->sqp); - kfree(mqp); - return 0; } @@ -1860,7 +1903,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64(smac), + ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } @@ -4473,3 +4516,17 @@ void mlx4_ib_drain_rq(struct ib_qp *qp) handle_drain_completion(cq, &rdrain, dev); } + +int mlx4_ib_qp_event_init(void) +{ + mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0); + if (!mlx4_ib_qp_event_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_ib_qp_event_cleanup(void) +{ + destroy_workqueue(mlx4_ib_qp_event_wq); +} |
