summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
diff options
context:
space:
mode:
authorYevgeny Kliteynik <kliteyn@nvidia.com>2020-11-03 01:31:53 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2021-04-19 20:17:46 -0700
commitaeacb52a8de7046be5399ba311f49bce96e1b269 (patch)
tree3914c07151a087b4c539557bded653f0ddeb9725 /drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
parent7304d603a57a1edecfecfbcc26f85edcda4cae81 (diff)
net/mlx5: DR, Add support for isolate_vl_tc QP
When using SW steering, rule insertion rate depends on the RDMA RC QP performance used for writing to the ICM. During stress this QP is competing on the HW resources with all the other QPs that are used to send data. To protect SW steering QP's performance in such cases, we set this QP to use isolated VL. The VL number is reserved by FW and is not exposed to the driver. Support for this QP on isolated VL exists only when both force-loopback and isolate_vl_tc capabilities are set. Signed-off-by: Alex Vesker <valex@mellanox.com> Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 69d623bedefe..12cf323a5943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -46,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn;
u32 max_send_wr;
struct mlx5_uars_page *uar;
+ u8 isolate_vl_tc:1;
};
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
@@ -158,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size,
@@ -924,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
+
+ /* Isolated VL is applicable only if force loopback is supported */
+ if (dr_send_allow_fl(&dmn->info.caps))
+ init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
+
spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);