summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c240
8 files changed, 681 insertions, 108 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b56b187a9097..7e94caca4888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3b88a8bb7082..7d4ceb9b9c16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -38,6 +38,7 @@
#include <net/netevent.h>
#include "en.h"
+#include "eswitch.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en_rep.h"
@@ -670,6 +671,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto err_xfrm;
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -678,7 +684,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto err_xfrm;
+ goto unblock_ipsec;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -735,6 +741,8 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_ipsec:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
kfree(sa_entry);
NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
@@ -764,6 +772,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
@@ -780,6 +789,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+ mlx5_eswitch_unblock_ipsec(ipsec->mdev);
sa_entry_free:
kfree(sa_entry);
}
@@ -1055,6 +1065,11 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
pol_entry->x = x;
pol_entry->ipsec = priv->ipsec;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto ipsec_busy;
+ }
+
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
@@ -1064,6 +1079,8 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return 0;
err_fs:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
+ipsec_busy:
kfree(pol_entry);
NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
@@ -1074,6 +1091,7 @@ static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
}
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index a1cfddd05bc4..7dba4221993f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -254,6 +254,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
@@ -357,6 +359,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto err_add;
/* Create FT */
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
@@ -411,6 +415,8 @@ err_pol_ft:
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
@@ -428,26 +434,19 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (rx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = rx_create(mdev, ipsec, rx, family);
- mlx5_eswitch_block_mode_unlock(mdev, err);
- if (err)
- goto err_out;
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
skip:
rx->ft.refcnt++;
return 0;
-
-err_out:
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
@@ -456,12 +455,8 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
if (--rx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
rx_destroy(ipsec->mdev, ipsec, rx, family);
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
@@ -581,6 +576,8 @@ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
mlx5_destroy_flow_group(tx->sa.group);
}
mlx5_destroy_flow_table(tx->ft.sa);
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(ipsec->mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
@@ -621,6 +618,8 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
if (err)
goto err_status_rule;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
@@ -687,6 +686,8 @@ err_pol_ft:
err_sa_miss:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
err_status_rule:
mlx5_destroy_flow_table(tx->ft.status);
@@ -720,32 +721,22 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (tx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = tx_create(ipsec, tx, ipsec->roce);
if (err) {
- mlx5_eswitch_block_mode_unlock(mdev, err);
- goto err_out;
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
}
if (tx == ipsec->tx_esw)
ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
- mlx5_eswitch_block_mode_unlock(mdev, err);
-
skip:
tx->ft.refcnt++;
return 0;
-
-err_out:
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
@@ -753,19 +744,13 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
-
if (tx == ipsec->tx_esw) {
mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
}
tx_destroy(ipsec, tx, ipsec->roce);
-
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 3c254a710006..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -92,6 +92,12 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+#ifdef CONFIG_XFRM_OFFLOAD
+ .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
+ .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
+#endif /* CONFIG_XFRM_OFFLOAD */
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
new file mode 100644
index 000000000000..da10e04777cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
+ *result = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+enum esw_vport_ipsec_offload {
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
+};
+
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ bool ipsec_enabled;
+ int err;
+
+ /* Querying IPsec caps only makes sense when generic ipsec_offload
+ * HCA cap is enabled
+ */
+ err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
+ if (err)
+ return err;
+
+ if (!ipsec_enabled) {
+ vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ vport->info.ipsec_crypto_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
+ ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (vport->vport == MLX5_VPORT_PF)
+ return -EOPNOTSUPP;
+
+ if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
+ err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ } else {
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
+ if (err)
+ return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+ }
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ vport->info.ipsec_crypto_enabled = enable;
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
+ if (ret)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
+ ret = -EOPNOTSUPP;
+free:
+ kvfree(query_cap);
+ return ret;
+}
+
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
+{
+ /* Old firmware doesn't support ipsec_offload capability for VFs. This
+ * can be detected by checking reformat_add_esp_trasport capability -
+ * when this cap isn't supported it means firmware cannot be trusted
+ * about what it reports for ipsec_offload cap.
+ */
+ return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ err = esw_ipsec_offload_supported(dev, vport_num);
+ if (err)
+ return err;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
+ goto free;
+
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index db1c2a076364..6cd7d6497e10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -48,6 +48,7 @@
#include "devlink.h"
#include "ecpf.h"
#include "en/mod_hdr.h"
+#include "en_accel/ipsec.h"
enum {
MLX5_ACTION_NONE = 0,
@@ -831,6 +832,8 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+
+ err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
out_free:
kfree(query_ctx);
return err;
@@ -913,6 +916,9 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count++;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
@@ -969,6 +975,10 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count--;
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -2336,3 +2346,34 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
return mlx5_esw_allowed(esw) ? esw->dev : NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
+
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->enabled_ipsec_vf_count) {
+ mutex_unlock(&esw->state_lock);
+ return false;
+ }
+
+ dev->num_ipsec_offloads++;
+ mutex_unlock(&esw->state_lock);
+ return true;
+}
+
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ /* Failure means no eswitch => core dev is not a PF */
+ return;
+
+ mutex_lock(&esw->state_lock);
+ dev->num_ipsec_offloads--;
+ mutex_unlock(&esw->state_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6fcece69d3be..37ab66e7b403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -163,6 +163,8 @@ struct mlx5_vport_info {
u8 trusted: 1;
u8 roce_enabled: 1;
u8 mig_enabled: 1;
+ u8 ipsec_crypto_enabled: 1;
+ u8 ipsec_packet_enabled: 1;
};
/* Vport context events */
@@ -380,6 +382,7 @@ struct mlx5_eswitch {
struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
+ u16 enabled_ipsec_vf_count;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -558,6 +561,16 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+#endif /* CONFIG_XFRM_OFFLOAD */
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -829,10 +842,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err);
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev);
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
@@ -857,6 +868,22 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport);
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -916,13 +943,14 @@ static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; }
-
-static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {}
-
-static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {}
+static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
+static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ return false;
+}
-static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {}
+static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 67eab99f95b1..752fb0dfb111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3641,65 +3641,32 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
return net_eq(devl_net, netdev_net);
}
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev)
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- /* Failure means no eswitch => not possible to change eswitch mode */
- devl_unlock(devlink);
+ if (!mlx5_esw_allowed(esw))
return 0;
- }
+ /* Take TC into account */
err = mlx5_esw_try_lock(esw);
- if (err < 0) {
- devl_unlock(devlink);
+ if (err < 0)
return err;
- }
-
- return 0;
-}
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
- if (!err)
- esw->offloads.num_block_mode++;
+ esw->offloads.num_block_mode++;
mlx5_esw_unlock(esw);
- devl_unlock(devlink);
+ return 0;
}
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev)
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
-}
-
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
esw->offloads.num_block_mode--;
up_write(&esw->mode_lock);
}
@@ -3903,38 +3870,28 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- devl_unlock(devlink);
- /* Failure means no eswitch => not possible to change encap */
+ if (!mlx5_esw_allowed(esw))
return true;
- }
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
@@ -4410,3 +4367,172 @@ mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handl
return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_crypto_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec crypto");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_crypto_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
+ goto unlock;
+ }
+
+ vport->info.ipsec_crypto_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+ bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec packet mode");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_packet_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set IPsec packet mode");
+ goto unlock;
+ }
+
+ vport->info.ipsec_packet_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+#endif /* CONFIG_XFRM_OFFLOAD */