summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_accel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_accel')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1870
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1384
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c72
16 files changed, 3676 insertions, 107 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1839f1ab1ddd..07187028f0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -39,6 +39,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
+#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
@@ -137,6 +138,15 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
+ return false;
+ }
+#endif
+
return true;
}
@@ -163,6 +173,11 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
+ mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
+#endif
+
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 20a4f1e585af..285d32d2fd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
+#include <mlx5_core.h>
#include "en_accel/fs_tcp.h"
#include "fs_core.h"
@@ -71,13 +71,13 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
@@ -86,19 +86,17 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs->accel_tcp;
-
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (sk->sk_family) {
case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
- mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
- &inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport,
- &inet_sk(sk)->inet_daddr,
- inet_sk(sk)->inet_dport);
+ fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
+ &inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ &inet_sk(sk)->inet_daddr,
+ inet_sk(sk)->inet_dport);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
@@ -140,34 +138,32 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
- PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
out:
kvfree(spec);
return flow;
}
-static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
+static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, accel_fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -265,9 +261,11 @@ out:
return err;
}
-static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
+static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,21 +275,21 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
- ft->t->id, ft->t->level);
+ fs_dbg(fs, "Created fs accel table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type);
if (err)
goto err;
- err = accel_fs_tcp_add_default_rule(priv, type);
+ err = accel_fs_tcp_add_default_rule(fs, type);
if (err)
goto err;
@@ -301,17 +299,18 @@ err:
return err;
}
-static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
@@ -319,32 +318,32 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs->accel_tcp->tables[i].t;
+ dest.ft = accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
return 0;
}
-static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
+static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{
- struct mlx5e_accel_fs_tcp *fs_tcp;
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
- fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -353,40 +352,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL;
}
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i;
- if (!priv->fs->accel_tcp)
+ if (!accel_tcp)
return;
- accel_fs_tcp_disable(priv);
+ accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
- accel_fs_tcp_destroy_table(priv, i);
+ accel_fs_tcp_destroy_table(fs, i);
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
}
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err;
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
- if (!priv->fs->accel_tcp)
+ accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ if (!accel_tcp)
return -ENOMEM;
+ mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- err = accel_fs_tcp_create_table(priv, i);
+ err = accel_fs_tcp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = accel_fs_tcp_enable(priv);
+ err = accel_fs_tcp_enable(fs);
if (err)
goto err_destroy_tables;
@@ -394,9 +396,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables:
while (--i >= 0)
- accel_fs_tcp_destroy_table(priv, i);
-
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ accel_fs_tcp_destroy_table(fs, i);
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index 589235824543..a032bff482a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__
-#include "en.h"
+#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
-static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
-static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
+static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
+static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f8113fd23265..b859e4a4c744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -174,6 +174,8 @@ static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -182,15 +184,14 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
-
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +206,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -230,6 +231,7 @@ err_add:
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
@@ -249,7 +251,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -260,6 +262,7 @@ out:
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -271,7 +274,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
@@ -385,7 +388,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
0xff, 16);
}
- flow_act->ipsec_obj_id = ipsec_obj_id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act->crypto.obj_id = ipsec_obj_id;
flow_act->flags |= FLOW_ACT_NO_APPEND;
}
@@ -441,7 +445,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr;
@@ -497,7 +501,7 @@ static int tx_add_rule(struct mlx5e_priv *priv,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -573,7 +577,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
if (!ns)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 0ae4e12ce528..1878a70b9031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,9 @@
#include "en.h"
#include "en/txrx.h"
-/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
-#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
@@ -77,11 +77,6 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
-{
- return ipsec_st->x;
-}
-
static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 30a70d139046..da2184c94203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_resync = mlx5e_ktls_resync,
};
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ /* Check the possibility to post the required ICOSQ WQEs. */
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
+ return false;
+
+ return true;
+}
+
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
@@ -118,9 +136,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable)
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
else
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock);
return err;
@@ -138,7 +156,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) {
destroy_workqueue(priv->tls->rx_wq);
return err;
@@ -154,7 +172,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 948400dee525..1c35045e41fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
+ MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
+ case TLS_CIPHER_AES_GCM_256:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256);
+ break;
}
return false;
@@ -56,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
-}
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 27483aa7be8a..3e54834747ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
};
struct mlx5e_ktls_offload_context_rx {
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct accel_rule rule;
struct sock *sk;
struct mlx5e_rq_stats *rq_stats;
@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
+ rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
bool trigger_poll;
@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
spin_lock_bh(&ktls_resync->lock);
spin_lock_bh(&priv_rx->lock);
- memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+ switch (priv_rx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &priv_rx->crypto_info.crypto_info_128;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &priv_rx->crypto_info.crypto_info_256;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_rx->crypto_info.crypto_info.cipher_type);
+ spin_unlock_bh(&priv_rx->lock);
+ spin_unlock_bh(&ktls_resync->lock);
+ return;
+ }
+
if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
@@ -461,6 +484,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct net_device *netdev = rq->netdev;
+ struct net *net = dev_net(netdev);
struct sock *sk = NULL;
unsigned int datalen;
struct iphdr *iph;
@@ -475,7 +499,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
@@ -485,7 +509,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
@@ -603,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
- priv_rx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_rx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_rx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3a1f76eac542..2e0335246967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx {
bool ctx_post_pending;
/* control / resync */
struct list_head list_node; /* member of the pool */
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_key;
priv_tx->expected_seq = start_offload_tcp_sn;
- priv_tx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_tx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_tx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- rec_seq = info->rec_seq;
- rec_seq_sz = sizeof(info->rec_seq);
+ switch (priv_tx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_tx->crypto_info.crypto_info.cipher_type);
+ return;
+ }
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
index ac29aeb8af49..570a912dd6fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
@@ -21,7 +21,7 @@ enum {
static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 key_id, u32 resync_tcp_sn)
{
char *initial_rn, *gcm_iv;
@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
ctx = params->ctx;
- EXTRACT_INFO_FIELDS;
+ switch (crypto_info->crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &crypto_info->crypto_info_128;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &crypto_info->crypto_info_256;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->crypto_info.cipher_type);
+ return;
+ }
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction)
{
@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
- fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
+ fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index 0dc715c4c10d..3d79cd379890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
+union mlx5e_crypto_info {
+ struct tls_crypto_info crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
+ struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
+};
+
struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction);
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
new file mode 100644
index 000000000000..5da746da898d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -0,0 +1,1870 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/xarray.h>
+
+#include "en.h"
+#include "lib/aso.h"
+#include "lib/mlx5.h"
+#include "en_accel/macsec.h"
+#include "en_accel/macsec_fs.h"
+
+#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
+#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
+
+enum mlx5_macsec_aso_event_arm {
+ MLX5E_ASO_EPN_ARM = BIT(0),
+};
+
+enum {
+ MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
+struct mlx5e_macsec_handle {
+ struct mlx5e_macsec *macsec;
+ u32 obj_id;
+ u8 idx;
+};
+
+enum {
+ MLX5_MACSEC_EPN,
+};
+
+struct mlx5e_macsec_aso_out {
+ u8 event_arm;
+ u32 mode_param;
+};
+
+struct mlx5e_macsec_aso_in {
+ u8 mode;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_epn_state {
+ u32 epn_msb;
+ u8 epn_enabled;
+ u8 overlap;
+};
+
+struct mlx5e_macsec_async_work {
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ struct work_struct work;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_sa {
+ bool active;
+ u8 assoc_num;
+ u32 macsec_obj_id;
+ u32 enc_key_id;
+ u32 next_pn;
+ sci_t sci;
+ salt_t salt;
+
+ struct rhash_head hash;
+ u32 fs_id;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct rcu_head rcu_head;
+ struct mlx5e_macsec_epn_state epn_state;
+};
+
+struct mlx5e_macsec_rx_sc;
+struct mlx5e_macsec_rx_sc_xarray_element {
+ u32 fs_id;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+};
+
+struct mlx5e_macsec_rx_sc {
+ bool active;
+ sci_t sci;
+ struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
+ struct list_head rx_sc_list_element;
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct metadata_dst *md_dst;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_macsec_umr {
+ dma_addr_t dma_addr;
+ u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ u32 mkey;
+};
+
+struct mlx5e_macsec_aso {
+ /* ASO */
+ struct mlx5_aso *maso;
+ /* Protects macsec ASO */
+ struct mutex aso_lock;
+ /* UMR */
+ struct mlx5e_macsec_umr *umr;
+
+ u32 pdn;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
+ .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
+ .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5e_macsec_device {
+ const struct net_device *netdev;
+ struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
+ struct list_head macsec_rx_sc_list_head;
+ unsigned char *dev_addr;
+ struct list_head macsec_device_list_element;
+};
+
+struct mlx5e_macsec {
+ struct list_head macsec_device_list_head;
+ int num_of_devices;
+ struct mlx5e_macsec_fs *macsec_fs;
+ struct mutex lock; /* Protects mlx5e_macsec internal contexts */
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
+
+ /* Rx fs_id -> rx_sc mapping */
+ struct xarray sc_xarray;
+
+ struct mlx5_core_dev *mdev;
+
+ /* Stats manage */
+ struct mlx5e_macsec_stats stats;
+
+ /* ASO */
+ struct mlx5e_macsec_aso aso;
+
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_macsec_obj_attrs {
+ u32 aso_pdn;
+ u32 next_pn;
+ __be64 sci;
+ u32 enc_key_id;
+ bool encrypt;
+ struct mlx5e_macsec_epn_state epn_state;
+ salt_t salt;
+ __be32 ssci;
+ bool replay_protect;
+ u32 replay_window;
+};
+
+struct mlx5_aso_ctrl_param {
+ u8 data_mask_mode;
+ u8 condition_0_operand;
+ u8 condition_1_operand;
+ u8 condition_0_offset;
+ u8 condition_1_offset;
+ u8 data_offset;
+ u8 condition_operand;
+ u32 condition_0_data;
+ u32 condition_0_mask;
+ u32 condition_1_data;
+ u32 condition_1_mask;
+ u64 bitwise_data;
+ u64 data_mask;
+};
+
+static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr;
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ umr = kzalloc(sizeof(*umr), GFP_KERNEL);
+ if (!umr) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
+ goto out_dma;
+ }
+
+ err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
+ goto out_mkey;
+ }
+
+ umr->dma_addr = dma_addr;
+
+ aso->umr = umr;
+
+ return 0;
+
+out_mkey:
+ dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+out_dma:
+ kfree(umr);
+ return err;
+}
+
+static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr = aso->umr;
+
+ mlx5_core_destroy_mkey(mdev, umr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ kfree(umr);
+}
+
+static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
+{
+ u8 window_sz;
+
+ if (!attrs->replay_protect)
+ return 0;
+
+ switch (attrs->replay_window) {
+ case 256:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ case 128:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 64:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 32:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
+
+ return 0;
+}
+
+static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_obj_attrs *attrs,
+ bool is_tx,
+ u32 *macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *aso_ctx;
+ void *obj;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
+ aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
+
+ MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
+ MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
+ MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
+ MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
+ MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
+
+ /* Epn */
+ if (attrs->epn_state.epn_enabled) {
+ void *salt_p;
+ int i;
+
+ MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
+ salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
+ for (i = 0; i < 3 ; i++)
+ memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
+ } else {
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
+ }
+
+ MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
+ if (is_tx) {
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
+ } else {
+ err = macsec_set_replay_protection(attrs, aso_ctx);
+ if (err)
+ return err;
+ }
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to create MACsec object (err = %d)\n",
+ err);
+ return err;
+ }
+
+ *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa,
+ bool is_tx)
+{
+ int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ if ((is_tx) && sa->fs_id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ sa->fs_id = 0;
+ }
+
+ if (!sa->macsec_rule)
+ return;
+
+ mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+ sa->macsec_rule = NULL;
+}
+
+static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa,
+ bool encrypt,
+ bool is_tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_macsec_obj_attrs obj_attrs;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct macsec_key *key;
+ int err;
+
+ obj_attrs.next_pn = sa->next_pn;
+ obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
+ obj_attrs.enc_key_id = sa->enc_key_id;
+ obj_attrs.encrypt = encrypt;
+ obj_attrs.aso_pdn = macsec->aso.pdn;
+ obj_attrs.epn_state = sa->epn_state;
+
+ if (is_tx) {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
+ key = &ctx->sa.tx_sa->key;
+ } else {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+ key = &ctx->sa.rx_sa->key;
+ }
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+ obj_attrs.replay_window = ctx->secy->replay_window;
+ obj_attrs.replay_protect = ctx->secy->replay_protect;
+
+ err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ if (!macsec_rule) {
+ err = -ENOMEM;
+ goto destroy_macsec_object;
+ }
+
+ sa->macsec_rule = macsec_rule;
+
+ if (is_tx) {
+ err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ if (err)
+ goto destroy_macsec_object_and_rule;
+ }
+
+ return 0;
+
+destroy_macsec_object_and_rule:
+ mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
+destroy_macsec_object:
+ mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
+
+ return err;
+}
+
+static struct mlx5e_macsec_rx_sc *
+mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
+{
+ struct mlx5e_macsec_rx_sc *iter;
+
+ list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
+ if (iter->sci == sci)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
+{
+ struct mlx5_core_dev *mdev = macsec->mdev;
+ struct mlx5_macsec_obj_attrs attrs;
+ int err = 0;
+
+ if (rx_sa->active != active)
+ return 0;
+
+ rx_sa->active = active;
+ if (!active) {
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ return 0;
+ }
+
+ attrs.sci = rx_sa->sci;
+ attrs.enc_key_id = rx_sa->enc_key_id;
+ err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
+{
+ const struct net_device *netdev = ctx->netdev;
+ const struct macsec_secy *secy = ctx->secy;
+
+ if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when validate_frame is in strict mode\n");
+ return false;
+ }
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
+ netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
+ MACSEC_DEFAULT_ICV_LEN);
+ return false;
+ }
+
+ if (!secy->protect_frames) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when protect_frames is set\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5e_macsec_device *
+mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
+ const struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_device *iter;
+ const struct list_head *list;
+
+ list = &macsec->macsec_device_list_head;
+ list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
+ if (iter->netdev == ctx->secy->netdev)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
+ const pn_t *next_pn_halves)
+{
+ struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+
+ sa->salt = key->salt;
+ epn_state->epn_enabled = 1;
+ epn_state->epn_msb = next_pn_halves->upper;
+ epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
+}
+
+static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (macsec_device->tx_sa[assoc_num]) {
+ netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
+ if (!tx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+ tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
+ tx_sa->sci = secy->sci;
+ tx_sa->assoc_num = assoc_num;
+
+ if (secy->xpn)
+ update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &tx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ macsec_device->tx_sa[assoc_num] = tx_sa;
+ if (!secy->operational ||
+ assoc_num != tx_sc->encoding_sa ||
+ !tx_sa->active)
+ goto out;
+
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto destroy_encryption_key;
+
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+destroy_encryption_key:
+ macsec_device->tx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
+destroy_sa:
+ kfree(tx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct net_device *netdev;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ netdev = ctx->netdev;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+ netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (tx_sa->active == ctx_tx_sa->active)
+ goto out;
+
+ if (tx_sa->assoc_num != tx_sc->encoding_sa)
+ goto out;
+
+ if (ctx_tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ } else {
+ if (!tx_sa->macsec_rule) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree_rcu(tx_sa);
+ macsec_device->tx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
+{
+ struct mlx5e_macsec_sa *macsec_sa;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
+ if (macsec_sa)
+ fs_id = macsec_sa->fs_id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct list_head *rx_sc_list;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
+ if (rx_sc) {
+ netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
+ ctx_rx_sc->sci);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
+ if (!sc_xarray_element) {
+ err = -ENOMEM;
+ goto destroy_rx_sc;
+ }
+
+ sc_xarray_element->rx_sc = rx_sc;
+ err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
+ XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
+ if (err)
+ goto destroy_sc_xarray_elemenet;
+
+ rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!rx_sc->md_dst) {
+ err = -ENOMEM;
+ goto erase_xa_alloc;
+ }
+
+ rx_sc->sci = ctx_rx_sc->sci;
+ rx_sc->active = ctx_rx_sc->active;
+ list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
+
+ rx_sc->sc_xarray_element = sc_xarray_element;
+ rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+erase_xa_alloc:
+ xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
+destroy_sc_xarray_elemenet:
+ kfree(sc_xarray_element);
+destroy_rx_sc:
+ kfree(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int i;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
+ if (!rx_sc) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc->active = ctx_rx_sc->active;
+ if (rx_sc->active == ctx_rx_sc->active)
+ goto out;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+/*
+ * At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+
+ kfree_rcu(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rx_sc->rx_sa[assoc_num]) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
+ if (!rx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rx_sa->active = ctx_rx_sa->active;
+ rx_sa->next_pn = ctx_rx_sa->next_pn;
+ rx_sa->sci = sci;
+ rx_sa->assoc_num = assoc_num;
+ rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
+
+ if (ctx->secy->xpn)
+ update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &rx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ rx_sc->rx_sa[assoc_num] = rx_sa;
+ if (!rx_sa->active)
+ goto out;
+
+ //TODO - add support for both authentication and encryption flows
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ if (err)
+ goto destroy_encryption_key;
+
+ goto out;
+
+destroy_encryption_key:
+ rx_sc->rx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
+destroy_sa:
+ kfree(rx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+ netdev_err(ctx->netdev,
+ "MACsec offload update RX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ sci_t sci = ctx->sa.rx_sa->sc->sci;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ const struct net_device *netdev = ctx->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
+ netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
+ goto out;
+ }
+
+ if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
+ netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
+ err = -EBUSY;
+ goto out;
+ }
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
+ if (!macsec_device->dev_addr) {
+ kfree(macsec_device);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->netdev = dev;
+
+ INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
+ list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
+
+ ++macsec->num_of_devices;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ struct mlx5e_macsec_device *macsec_device)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct list_head *list;
+ int i, err = 0;
+
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa || !rx_sa->macsec_rule)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ }
+ }
+
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ if (rx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+ memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+ return err;
+}
+
+/* this function is called from 2 macsec ops functions:
+ * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
+ * and create new Tx contexts(macsec object + steering).
+ * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
+ * destroy Tx and Rx contexts(macsec object + steering)
+ */
+static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int i, err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
+ if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
+ err = macsec_upd_secy_hw_address(ctx, macsec_device);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree(tx_sa);
+ macsec_device->tx_sa[i] = NULL;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+
+ kfree_rcu(rx_sc);
+ }
+
+ kfree(macsec_device->dev_addr);
+ macsec_device->dev_addr = NULL;
+
+ list_del_rcu(&macsec_device->macsec_device_list_element);
+ --macsec->num_of_devices;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
+ struct mlx5_macsec_obj_attrs *attrs)
+{
+ attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
+ attrs->epn_state.overlap = sa->epn_state.overlap;
+}
+
+static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5_aso_ctrl_param *param)
+{
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ if (macsec_aso->umr->dma_addr) {
+ aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
+ }
+
+ if (!param)
+ return;
+
+ aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
+ aso_ctrl->condition_1_0_operand = param->condition_1_operand |
+ param->condition_0_operand << 4;
+ aso_ctrl->condition_1_0_offset = param->condition_1_offset |
+ param->condition_0_offset << 4;
+ aso_ctrl->data_offset_condition_operand = param->data_offset |
+ param->condition_operand << 6;
+ aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
+ aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
+ aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
+ aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
+ aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
+ aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
+}
+
+static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
+ u32 macsec_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
+ u64 modify_field_select = 0;
+ void *obj;
+ int err;
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
+ macsec_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
+ modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
+
+ /* EPN */
+ if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
+ mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
+ macsec_id);
+ return -EOPNOTSUPP;
+ }
+
+ obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
+ MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5_aso_ctrl_param param = {};
+
+ param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
+ param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
+ param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
+ if (in->mode == MLX5_MACSEC_EPN) {
+ param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ param.bitwise_data = BIT_ULL(54);
+ param.data_mask = param.bitwise_data;
+ }
+ macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
+}
+
+static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ mutex_unlock(&aso->aso_lock);
+
+ return err;
+}
+
+static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ if (err)
+ goto err_out;
+
+ if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
+ out->event_arm |= MLX5E_ASO_EPN_ARM;
+
+ out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
+
+err_out:
+ mutex_unlock(&aso->aso_lock);
+ return err;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = iter->tx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list, *sc_list;
+ struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ sc_list = &iter->macsec_rx_sc_list_head;
+ list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = mlx5e_rx_sc->rx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
+ struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
+{
+ struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_macsec_aso_in in = {};
+
+ /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
+ * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
+ * esn_overlap to OLD (1).
+ * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
+ * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
+ * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
+ */
+
+ if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
+ sa->epn_state.epn_msb++;
+ sa->epn_state.overlap = 0;
+ } else {
+ sa->epn_state.overlap = 1;
+ }
+
+ macsec_build_accel_attrs(sa, &attrs);
+ mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
+
+ /* Re-set EPN arm event */
+ in.obj_id = obj_id;
+ in.mode = MLX5_MACSEC_EPN;
+ macsec_aso_set_arm_event(mdev, macsec, &in);
+}
+
+static void macsec_async_event(struct work_struct *work)
+{
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5e_macsec_aso_out out = {};
+ struct mlx5e_macsec_aso_in in = {};
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+
+ async_work = container_of(work, struct mlx5e_macsec_async_work, work);
+ macsec = async_work->macsec;
+ mdev = async_work->mdev;
+ obj_id = async_work->obj_id;
+ macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
+ goto out_async_work;
+ }
+ }
+
+ /* Query MACsec ASO context */
+ in.obj_id = obj_id;
+ macsec_aso_query(mdev, macsec, &in, &out);
+
+ /* EPN case */
+ if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
+ macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
+
+out_async_work:
+ kfree(async_work);
+}
+
+static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5_eqe_obj_change *obj_change;
+ struct mlx5_eqe *eqe = data;
+ u16 obj_type;
+ u32 obj_id;
+
+ if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+ return NOTIFY_DONE;
+
+ obj_change = &eqe->data.obj_change;
+ obj_type = be16_to_cpu(obj_change->obj_type);
+ obj_id = be32_to_cpu(obj_change->obj_id);
+
+ if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
+ return NOTIFY_DONE;
+
+ async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
+ if (!async_work)
+ return NOTIFY_DONE;
+
+ async_work->macsec = macsec;
+ async_work->mdev = macsec->mdev;
+ async_work->obj_id = obj_id;
+
+ INIT_WORK(&async_work->work, macsec_async_event);
+
+ WARN_ON(!queue_work(macsec->wq, &async_work->work));
+
+ return NOTIFY_OK;
+}
+
+static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ struct mlx5_aso *maso;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &aso->pdn);
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
+ err);
+ return err;
+ }
+
+ maso = mlx5_aso_create(mdev, aso->pdn);
+ if (IS_ERR(maso)) {
+ err = PTR_ERR(maso);
+ goto err_aso;
+ }
+
+ err = mlx5e_macsec_aso_reg_mr(mdev, aso);
+ if (err)
+ goto err_aso_reg;
+
+ mutex_init(&aso->aso_lock);
+
+ aso->maso = maso;
+
+ return 0;
+
+err_aso_reg:
+ mlx5_aso_destroy(maso);
+err_aso:
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+ return err;
+}
+
+static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ if (!aso)
+ return;
+
+ mlx5e_macsec_aso_dereg_mr(mdev, aso);
+
+ mlx5_aso_destroy(aso->maso);
+
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+}
+
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
+{
+ mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
+}
+
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
+{
+ if (!macsec)
+ return NULL;
+
+ return &macsec->stats;
+}
+
+static const struct macsec_ops macsec_offload_ops = {
+ .mdo_add_txsa = mlx5e_macsec_add_txsa,
+ .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
+ .mdo_del_txsa = mlx5e_macsec_del_txsa,
+ .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
+ .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
+ .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
+ .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
+ .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
+ .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
+ .mdo_add_secy = mlx5e_macsec_add_secy,
+ .mdo_upd_secy = mlx5e_macsec_upd_secy,
+ .mdo_del_secy = mlx5e_macsec_del_secy,
+};
+
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ goto err_out;
+
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ return;
+
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec *macsec;
+ u32 fs_id;
+
+ macsec = priv->macsec;
+ if (!macsec)
+ return;
+
+ fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+
+ rcu_read_lock();
+ sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
+ rx_sc = sc_xarray_element->rx_sc;
+ if (rx_sc) {
+ dst_hold(&rx_sc->md_dst->dst);
+ skb_dst_set(skb, &rx_sc->md_dst->dst);
+ }
+
+ rcu_read_unlock();
+}
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return;
+
+ /* Enable MACsec */
+ mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
+ netdev->macsec_ops = &macsec_offload_ops;
+ netdev->features |= NETIF_F_HW_MACSEC;
+ netif_keep_dst(netdev);
+}
+
+int mlx5e_macsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_macsec *macsec = NULL;
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ if (!mlx5e_is_macsec_device(priv->mdev)) {
+ mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
+ return 0;
+ }
+
+ macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
+ if (!macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&macsec->macsec_device_list_head);
+ mutex_init(&macsec->lock);
+
+ err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
+ goto err_aso;
+ }
+
+ macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
+ if (!macsec->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
+
+ priv->macsec = macsec;
+
+ macsec->mdev = mdev;
+
+ macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ if (!macsec_fs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ macsec->macsec_fs = macsec_fs;
+
+ macsec->nb.notifier_call = macsec_obj_change_event;
+ mlx5_notifier_register(mdev, &macsec->nb);
+
+ mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
+
+ return 0;
+
+err_out:
+ destroy_workqueue(macsec->wq);
+err_wq:
+ mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
+err_aso:
+ rhashtable_destroy(&macsec->sci_hash);
+err_hash:
+ kfree(macsec);
+ priv->macsec = NULL;
+ return err;
+}
+
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_core_dev *mdev = macsec->mdev;
+
+ if (!macsec)
+ return;
+
+ mlx5_notifier_unregister(mdev, &macsec->nb);
+
+ mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+
+ /* Cleanup workqueue */
+ destroy_workqueue(macsec->wq);
+
+ mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
+
+ priv->macsec = NULL;
+
+ rhashtable_destroy(&macsec->sci_hash);
+
+ mutex_destroy(&macsec->lock);
+
+ kfree(macsec);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
new file mode 100644
index 000000000000..d580b4a91253
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACCEL_MACSEC_H__
+#define __MLX5_EN_ACCEL_MACSEC_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include <linux/mlx5/driver.h>
+#include <net/macsec.h>
+#include <net/dst_metadata.h>
+
+/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+
+struct mlx5e_priv;
+struct mlx5e_macsec;
+
+struct mlx5e_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_macsec_init(struct mlx5e_priv *priv);
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ return md_dst && (md_dst->type == METADATA_MACSEC);
+}
+
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
+
+#else
+
+static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
+static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{}
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
+#endif /* CONFIG_MLX5_EN_MACSEC */
+
+#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
new file mode 100644
index 000000000000..13dc628b988a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/netdevice.h>
+#include <linux/mlx5/qp.h>
+#include "fs_core.h"
+#include "en/fs.h"
+#include "en_accel/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5e_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5e_macsec_tables {
+ struct mlx5e_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5e_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5e_macsec_tables tables;
+};
+
+struct mlx5e_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5e_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5e_macsec_tables tables;
+};
+
+union mlx5e_macsec_rule {
+ struct mlx5e_macsec_tx_rule tx_rule;
+ struct mlx5e_macsec_rx_rule rx_rule;
+};
+
+struct mlx5e_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5e_macsec_rx *rx_fs;
+};
+
+static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto out_spec;
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_tx_rule *tx_rule)
+{
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+ u32 fs_id;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = fs_id;
+ *sa_fs_id = fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5e_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto free_spec;
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_rx_rule *rx_rule)
+{
+ int i;
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[3-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+}
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action)
+{
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
+}
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ kfree(macsec_fs);
+}
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+ macsec_fs->netdev = netdev;
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto err;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+err:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
new file mode 100644
index 000000000000..b429648d4ee7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include "en_accel/macsec.h"
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5e_macsec_fs;
+union mlx5e_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action);
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
new file mode 100644
index 000000000000..e50a2e3f3d18
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "en_accel/macsec.h"
+
+static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+};
+
+#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(macsec_hw)
+{
+ if (!priv->macsec)
+ return 0;
+
+ if (mlx5e_is_macsec_device(priv->mdev))
+ return NUM_MACSEC_HW_COUNTERS;
+
+ return 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(macsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
+{
+ unsigned int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_macsec_hw_stats_desc[i].format);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
+{
+ int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ mlx5e_macsec_hw_stats_desc,
+ i);
+
+ return idx;
+}
+
+MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);