summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cache.c6
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/macsec.c364
-rw-r--r--drivers/infiniband/hw/mlx5/macsec.h29
-rw-r--r--drivers/infiniband/hw/mlx5/main.c41
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c2410
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h64
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--include/linux/mlx5/device.h2
-rw-r--r--include/linux/mlx5/driver.h52
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/macsec.h32
-rw-r--r--include/net/macsec.h2
26 files changed, 3118 insertions, 1631 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 2e91d8879326..73f913cbd146 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -400,6 +400,9 @@ static void del_gid(struct ib_device *ib_dev, u32 port,
table->data_vec[ix] = NULL;
write_unlock_irq(&table->rwlock);
+ if (rdma_cap_roce_gid_table(ib_dev, port))
+ ib_dev->ops.del_gid(&entry->attr, &entry->context);
+
ndev_storage = entry->ndev_storage;
if (ndev_storage) {
entry->ndev_storage = NULL;
@@ -407,9 +410,6 @@ static void del_gid(struct ib_device *ib_dev, u32 port,
call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
}
- if (rdma_cap_roce_gid_table(ib_dev, port))
- ib_dev->ops.del_gid(&entry->attr, &entry->context);
-
put_gid_entry_locked(entry);
}
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 612ee8190a2d..72a526236c2e 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -28,3 +28,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
fs.o \
qos.o \
std_types.o
+mlx5_ib-$(CONFIG_MLX5_MACSEC) += macsec.o
diff --git a/drivers/infiniband/hw/mlx5/macsec.c b/drivers/infiniband/hw/mlx5/macsec.c
new file mode 100644
index 000000000000..3c56eb5eddf3
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/macsec.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "macsec.h"
+#include <linux/mlx5/macsec.h>
+
+struct mlx5_reserved_gids {
+ int macsec_index;
+ const struct ib_gid_attr *physical_gid;
+};
+
+struct mlx5_roce_gids {
+ struct list_head roce_gid_list_entry;
+ u16 gid_idx;
+ union {
+ struct sockaddr_in sockaddr_in;
+ struct sockaddr_in6 sockaddr_in6;
+ } addr;
+};
+
+struct mlx5_macsec_device {
+ struct list_head macsec_devices_list_entry;
+ void *macdev;
+ struct list_head macsec_roce_gids;
+ struct list_head tx_rules_list;
+ struct list_head rx_rules_list;
+};
+
+static void cleanup_macsec_device(struct mlx5_macsec_device *macsec_device)
+{
+ if (!list_empty(&macsec_device->tx_rules_list) ||
+ !list_empty(&macsec_device->rx_rules_list) ||
+ !list_empty(&macsec_device->macsec_roce_gids))
+ return;
+
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+}
+
+static struct mlx5_macsec_device *get_macsec_device(void *macdev,
+ struct list_head *macsec_devices_list)
+{
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (macsec_device)
+ return macsec_device;
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device)
+ return NULL;
+
+ macsec_device->macdev = macdev;
+ INIT_LIST_HEAD(&macsec_device->tx_rules_list);
+ INIT_LIST_HEAD(&macsec_device->rx_rules_list);
+ INIT_LIST_HEAD(&macsec_device->macsec_roce_gids);
+ list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
+
+ return macsec_device;
+}
+
+static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device *macsec_device, u16 gid_idx)
+{
+ struct mlx5_roce_gids *current_gid, *next_gid;
+
+ list_for_each_entry_safe(current_gid, next_gid, &macsec_device->macsec_roce_gids,
+ roce_gid_list_entry)
+ if (current_gid->gid_idx == gid_idx) {
+ list_del(&current_gid->roce_gid_list_entry);
+ kfree(current_gid);
+ }
+}
+
+static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device *macsec_device,
+ const struct sockaddr *addr, u16 gid_idx)
+{
+ struct mlx5_roce_gids *roce_gids;
+
+ roce_gids = kzalloc(sizeof(*roce_gids), GFP_KERNEL);
+ if (!roce_gids)
+ return;
+
+ roce_gids->gid_idx = gid_idx;
+ if (addr->sa_family == AF_INET)
+ memcpy(&roce_gids->addr.sockaddr_in, addr, sizeof(roce_gids->addr.sockaddr_in));
+ else
+ memcpy(&roce_gids->addr.sockaddr_in6, addr, sizeof(roce_gids->addr.sockaddr_in6));
+
+ list_add_tail(&roce_gids->roce_gid_list_entry, &macsec_device->macsec_roce_gids);
+}
+
+static void handle_macsec_gids(struct list_head *macsec_devices_list,
+ struct mlx5_macsec_event_data *data)
+{
+ struct mlx5_macsec_device *macsec_device;
+ struct mlx5_roce_gids *gid;
+
+ macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
+ if (!macsec_device)
+ return;
+
+ list_for_each_entry(gid, &macsec_device->macsec_roce_gids, roce_gid_list_entry) {
+ mlx5_macsec_add_roce_sa_rules(data->fs_id, (struct sockaddr *)&gid->addr,
+ gid->gid_idx, &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, data->macsec_fs,
+ data->is_tx);
+ }
+}
+
+static void del_sa_roce_rule(struct list_head *macsec_devices_list,
+ struct mlx5_macsec_event_data *data)
+{
+ struct mlx5_macsec_device *macsec_device;
+
+ macsec_device = get_macsec_device(data->macdev, macsec_devices_list);
+ WARN_ON(!macsec_device);
+
+ mlx5_macsec_del_roce_sa_rules(data->fs_id, data->macsec_fs,
+ &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, data->is_tx);
+}
+
+static int macsec_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_macsec *macsec = container_of(nb, struct mlx5_macsec, blocking_events_nb);
+
+ mutex_lock(&macsec->lock);
+ switch (event) {
+ case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED:
+ handle_macsec_gids(&macsec->macsec_devices_list, data);
+ break;
+ case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED:
+ del_sa_roce_rule(&macsec->macsec_devices_list, data);
+ break;
+ default:
+ mutex_unlock(&macsec->lock);
+ return NOTIFY_DONE;
+ }
+ mutex_unlock(&macsec->lock);
+ return NOTIFY_OK;
+}
+
+void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ dev->macsec.blocking_events_nb.notifier_call = macsec_event;
+ blocking_notifier_chain_register(&dev->mdev->macsec_nh,
+ &dev->macsec.blocking_events_nb);
+}
+
+void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ blocking_notifier_chain_unregister(&dev->mdev->macsec_nh,
+ &dev->macsec.blocking_events_nb);
+}
+
+int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev)
+{
+ int i, j, max_gids;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return 0;
+ }
+
+ max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->port[i].reserved_gids = kcalloc(max_gids,
+ sizeof(*dev->port[i].reserved_gids),
+ GFP_KERNEL);
+ if (!dev->port[i].reserved_gids)
+ goto err;
+
+ for (j = 0; j < max_gids; j++)
+ dev->port[i].reserved_gids[j].macsec_index = -1;
+ }
+
+ INIT_LIST_HEAD(&dev->macsec.macsec_devices_list);
+ mutex_init(&dev->macsec.lock);
+
+ return 0;
+err:
+ while (i >= 0) {
+ kfree(dev->port[i].reserved_gids);
+ i--;
+ }
+ return -ENOMEM;
+}
+
+void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev)
+{
+ int i;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev))
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+
+ for (i = 0; i < dev->num_ports; i++)
+ kfree(dev->port[i].reserved_gids);
+
+ mutex_destroy(&dev->macsec.lock);
+}
+
+int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(attr->device);
+ struct mlx5_macsec_device *macsec_device;
+ const struct ib_gid_attr *physical_gid;
+ struct mlx5_reserved_gids *mgids;
+ struct net_device *ndev;
+ int ret = 0;
+ union {
+ struct sockaddr_in sockaddr_in;
+ struct sockaddr_in6 sockaddr_in6;
+ } addr;
+
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return 0;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return 0;
+ }
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+
+ if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ dev_hold(ndev);
+ rcu_read_unlock();
+
+ mutex_lock(&dev->macsec.lock);
+ macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
+ if (!macsec_device) {
+ ret = -ENOMEM;
+ goto dev_err;
+ }
+
+ physical_gid = rdma_find_gid(attr->device, &attr->gid,
+ attr->gid_type, NULL);
+ if (!IS_ERR(physical_gid)) {
+ ret = set_roce_addr(to_mdev(physical_gid->device),
+ physical_gid->port_num,
+ physical_gid->index, NULL,
+ physical_gid);
+ if (ret)
+ goto gid_err;
+
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index];
+ mgids->macsec_index = attr->index;
+ mgids->physical_gid = physical_gid;
+ }
+
+ /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/
+ rdma_gid2ip((struct sockaddr *)&addr, &attr->gid);
+ ret = mlx5_macsec_add_roce_rule(ndev, (struct sockaddr *)&addr, attr->index,
+ &macsec_device->tx_rules_list,
+ &macsec_device->rx_rules_list, dev->mdev->macsec_fs);
+ if (ret && !IS_ERR(physical_gid))
+ goto rule_err;
+
+ mlx5_macsec_save_roce_gid(macsec_device, (struct sockaddr *)&addr, attr->index);
+
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+ return ret;
+
+rule_err:
+ set_roce_addr(to_mdev(physical_gid->device), physical_gid->port_num,
+ physical_gid->index, &physical_gid->gid, physical_gid);
+ mgids->macsec_index = -1;
+gid_err:
+ rdma_put_gid_attr(physical_gid);
+ cleanup_macsec_device(macsec_device);
+dev_err:
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+ return ret;
+}
+
+void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(attr->device);
+ struct mlx5_macsec_device *macsec_device;
+ struct mlx5_reserved_gids *mgids;
+ struct net_device *ndev;
+ int i, max_gids;
+
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return;
+
+ if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
+ return;
+ }
+
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index];
+ if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */
+ rdma_put_gid_attr(mgids->physical_gid);
+ mgids->macsec_index = -1;
+ return;
+ }
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
+ rcu_read_unlock();
+ return;
+ }
+ dev_hold(ndev);
+ rcu_read_unlock();
+
+ mutex_lock(&dev->macsec.lock);
+ max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
+ for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */
+ mgids = &dev->port[attr->port_num - 1].reserved_gids[i];
+ if (mgids->macsec_index == attr->index) {
+ const struct ib_gid_attr *physical_gid = mgids->physical_gid;
+
+ set_roce_addr(to_mdev(physical_gid->device),
+ physical_gid->port_num,
+ physical_gid->index,
+ &physical_gid->gid, physical_gid);
+
+ rdma_put_gid_attr(physical_gid);
+ mgids->macsec_index = -1;
+ break;
+ }
+ }
+ macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list);
+ mlx5_macsec_del_roce_rule(attr->index, dev->mdev->macsec_fs,
+ &macsec_device->tx_rules_list, &macsec_device->rx_rules_list);
+ mlx5_macsec_del_roce_gid(macsec_device, attr->index);
+ cleanup_macsec_device(macsec_device);
+
+ dev_put(ndev);
+ mutex_unlock(&dev->macsec.lock);
+}
diff --git a/drivers/infiniband/hw/mlx5/macsec.h b/drivers/infiniband/hw/mlx5/macsec.h
new file mode 100644
index 000000000000..9b77ba90f0f4
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/macsec.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_MACSEC_H__
+#define __MLX5_MACSEC_H__
+
+#include <net/macsec.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+#include "mlx5_ib.h"
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_reserved_gids;
+
+int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr);
+void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr);
+int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev);
+void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev);
+#else
+static inline int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) { return 0; }
+static inline void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) {}
+static inline int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev) { return 0; }
+static inline void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) {}
+static inline void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev) {}
+static inline void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev) {}
+#endif
+#endif /* __MLX5_MACSEC_H__ */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3c25b9045f9d..aed5cdea50e6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,6 +46,7 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include "macsec.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -564,9 +565,9 @@ out:
return err;
}
-static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
- unsigned int index, const union ib_gid *gid,
- const struct ib_gid_attr *attr)
+int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
{
enum ib_gid_type gid_type;
u16 vlan_id = 0xffff;
@@ -607,6 +608,12 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
+ int ret;
+
+ ret = mlx5r_add_gid_macsec_operations(attr);
+ if (ret)
+ return ret;
+
return set_roce_addr(to_mdev(attr->device), attr->port_num,
attr->index, &attr->gid, attr);
}
@@ -614,8 +621,15 @@ static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
- return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, NULL, attr);
+ int ret;
+
+ ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
+ attr->index, NULL, attr);
+ if (ret)
+ return ret;
+
+ mlx5r_del_gid_macsec_operations(attr);
+ return 0;
}
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
@@ -3644,13 +3658,13 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
mutex_destroy(&dev->cap_mask_mutex);
WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+ mlx5r_macsec_dealloc_gids(dev);
}
static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- int err;
- int i;
+ int err, i;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -3670,10 +3684,14 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (err)
return err;
- err = mlx5_ib_init_multiport_master(dev);
+ err = mlx5r_macsec_init_gids_and_devlist(dev);
if (err)
return err;
+ err = mlx5_ib_init_multiport_master(dev);
+ if (err)
+ goto err;
+
err = set_has_smi_cap(dev);
if (err)
goto err_mp;
@@ -3697,7 +3715,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
-
+err:
+ mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
return err;
@@ -4106,11 +4125,15 @@ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{
dev->mdev_events.notifier_call = mlx5_ib_event;
mlx5_notifier_register(dev->mdev, &dev->mdev_events);
+
+ mlx5r_macsec_event_register(dev);
+
return 0;
}
static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
{
+ mlx5r_macsec_event_unregister(dev);
mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 9c33d960af3c..16713baf0d06 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -26,6 +26,7 @@
#include "srq.h"
#include "qp.h"
+#include "macsec.h"
#define mlx5_ib_dbg(_dev, format, arg...) \
dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
@@ -870,6 +871,9 @@ struct mlx5_ib_port {
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
struct mlx5_roce roce;
struct mlx5_eswitch_rep *rep;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_reserved_gids *reserved_gids;
+#endif
};
struct mlx5_ib_dbg_param {
@@ -1086,6 +1090,12 @@ struct mlx5_special_mkeys {
__be32 terminate_scatter_list_mkey;
};
+struct mlx5_macsec {
+ struct mutex lock; /* Protects mlx5_macsec internal contexts */
+ struct list_head macsec_devices_list;
+ struct notifier_block blocking_events_nb;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -1145,6 +1155,10 @@ struct mlx5_ib_dev {
u16 pkey_table_len;
u8 lag_ports;
struct mlx5_special_mkeys mkeys;
+
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec macsec;
+#endif
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1648,4 +1662,7 @@ static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
return access_flags & IB_ACCESS_RELAXED_ORDERING;
}
+int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr);
#endif /* MLX5_IB_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index d537d517cabe..c4f4de82e29e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -140,7 +140,7 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_EN_MACSEC
+config MLX5_MACSEC
bool "Connect-X support for MACSec offload"
depends on MLX5_CORE_EN
depends on MACSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 946310390659..b56b187a9097 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -98,7 +98,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
-mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+mlx5_core-$(CONFIG_MLX5_MACSEC) += en_accel/macsec.o lib/macsec_fs.o \
en_accel/macsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c1deb04ba7e8..86f2690c5e01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -917,7 +917,7 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
struct mlx5e_macsec *macsec;
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index bac4717548c6..caa34b9c161e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -138,7 +138,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -173,7 +173,7 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 592b165530ff..c9c1db971652 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -10,7 +10,6 @@
#include "lib/aso.h"
#include "lib/crypto.h"
#include "en_accel/macsec.h"
-#include "en_accel/macsec_fs.h"
#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
@@ -66,9 +65,7 @@ struct mlx5e_macsec_sa {
ssci_t ssci;
salt_t salt;
- struct rhash_head hash;
- u32 fs_id;
- union mlx5e_macsec_rule *macsec_rule;
+ union mlx5_macsec_rule *macsec_rule;
struct rcu_head rcu_head;
struct mlx5e_macsec_epn_state epn_state;
};
@@ -106,14 +103,6 @@ struct mlx5e_macsec_aso {
u32 pdn;
};
-static const struct rhashtable_params rhash_sci = {
- .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
- .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
- .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
struct mlx5e_macsec_device {
const struct net_device *netdev;
struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
@@ -125,20 +114,13 @@ struct mlx5e_macsec_device {
struct mlx5e_macsec {
struct list_head macsec_device_list_head;
int num_of_devices;
- struct mlx5e_macsec_fs *macsec_fs;
struct mutex lock; /* Protects mlx5e_macsec internal contexts */
- /* Tx sci -> fs id mapping handling */
- struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
-
/* Rx fs_id -> rx_sc mapping */
struct xarray sc_xarray;
struct mlx5_core_dev *mdev;
- /* Stats manage */
- struct mlx5e_macsec_stats stats;
-
/* ASO */
struct mlx5e_macsec_aso aso;
@@ -330,36 +312,30 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
struct mlx5e_macsec_sa *sa,
- bool is_tx)
+ bool is_tx, struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
- if ((is_tx) && sa->fs_id) {
- /* Make sure ongoing datapath readers sees a valid SA */
- rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
- sa->fs_id = 0;
- }
-
if (!sa->macsec_rule)
return;
- mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
+ fs_id);
mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
- bool encrypt,
- bool is_tx)
+ bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5e_macsec_rule *macsec_rule;
+ union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -387,7 +363,7 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
- macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
if (!macsec_rule) {
err = -ENOMEM;
goto destroy_macsec_object;
@@ -395,16 +371,8 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
sa->macsec_rule = macsec_rule;
- if (is_tx) {
- err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
- if (err)
- goto destroy_macsec_object_and_rule;
- }
-
return 0;
-destroy_macsec_object_and_rule:
- mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
destroy_macsec_object:
mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
@@ -426,7 +394,7 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
static int macsec_rx_sa_active_update(struct macsec_context *ctx,
struct mlx5e_macsec_sa *rx_sa,
- bool active)
+ bool active, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
@@ -437,11 +405,11 @@ static int macsec_rx_sa_active_update(struct macsec_context *ctx,
rx_sa->active = active;
if (!active) {
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
return 0;
}
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
if (err)
rx_sa->active = false;
@@ -563,7 +531,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
!tx_sa->active)
goto out;
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto destroy_encryption_key;
@@ -627,7 +595,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -636,7 +604,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -669,7 +637,7 @@ static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
kfree_rcu_mightsleep(tx_sa);
macsec_device->tx_sa[assoc_num] = NULL;
@@ -680,20 +648,6 @@ out:
return err;
}
-static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
-{
- struct mlx5e_macsec_sa *macsec_sa;
- u32 fs_id = 0;
-
- rcu_read_lock();
- macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
- if (macsec_sa)
- fs_id = macsec_sa->fs_id;
- rcu_read_unlock();
-
- return fs_id;
-}
-
static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
{
struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
@@ -813,7 +767,8 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
if (!rx_sa)
continue;
- err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -824,7 +779,8 @@ out:
return err;
}
-static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
+static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
+ struct net_device *netdev)
{
struct mlx5e_macsec_sa *rx_sa;
int i;
@@ -834,7 +790,8 @@ static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec
if (!rx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
@@ -882,7 +839,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
goto out;
}
- macsec_del_rxsc_ctx(macsec, rx_sc);
+ macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
out:
mutex_unlock(&macsec->lock);
@@ -941,7 +898,6 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
rx_sa->next_pn = ctx_rx_sa->next_pn;
rx_sa->sci = sci;
rx_sa->assoc_num = assoc_num;
- rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
if (ctx->secy->xpn)
update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
@@ -958,7 +914,7 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
goto out;
//TODO - add support for both authentication and encryption flows
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
if (err)
goto destroy_encryption_key;
@@ -1025,7 +981,8 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
- err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
+ &rx_sc->sc_xarray_element->fs_id);
out:
mutex_unlock(&macsec->lock);
@@ -1073,7 +1030,8 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1154,7 +1112,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1165,7 +1124,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1218,7 +1178,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1227,7 +1187,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
@@ -1265,7 +1225,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
kfree(tx_sa);
macsec_device->tx_sa[i] = NULL;
@@ -1273,7 +1233,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
list = &macsec_device->macsec_rx_sc_list_head;
list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
- macsec_del_rxsc_ctx(macsec, rx_sc);
+ macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
kfree(macsec_device->dev_addr);
macsec_device->dev_addr = NULL;
@@ -1647,50 +1607,6 @@ static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_c
mlx5_core_dealloc_pd(mdev, aso->pdn);
}
-bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
-{
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
- return false;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
- return false;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
- !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
- !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
- return false;
-
- return true;
-}
-
-void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
-{
- mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
-}
-
-struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
-{
- if (!macsec)
- return NULL;
-
- return &macsec->stats;
-}
-
static const struct macsec_ops macsec_offload_ops = {
.mdo_add_txsa = mlx5e_macsec_add_txsa,
.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
@@ -1711,7 +1627,8 @@ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb
struct metadata_dst *md_dst = skb_metadata_dst(skb);
u32 fs_id;
- fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
+ &md_dst->u.macsec_info.sci);
if (!fs_id)
goto err_out;
@@ -1729,7 +1646,8 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
struct metadata_dst *md_dst = skb_metadata_dst(skb);
u32 fs_id;
- fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
+ &md_dst->u.macsec_info.sci);
if (!fs_id)
return;
@@ -1782,7 +1700,7 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_macsec *macsec = NULL;
- struct mlx5e_macsec_fs *macsec_fs;
+ struct mlx5_macsec_fs *macsec_fs;
int err;
if (!mlx5e_is_macsec_device(priv->mdev)) {
@@ -1797,13 +1715,6 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&macsec->macsec_device_list_head);
mutex_init(&macsec->lock);
- err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
- if (err) {
- mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
- err);
- goto err_hash;
- }
-
err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
if (err) {
mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
@@ -1822,13 +1733,13 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
macsec->mdev = mdev;
- macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ macsec_fs = mlx5_macsec_fs_init(mdev);
if (!macsec_fs) {
err = -ENOMEM;
goto err_out;
}
- macsec->macsec_fs = macsec_fs;
+ mdev->macsec_fs = macsec_fs;
macsec->nb.notifier_call = macsec_obj_change_event;
mlx5_notifier_register(mdev, &macsec->nb);
@@ -1842,8 +1753,6 @@ err_out:
err_wq:
mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
err_aso:
- rhashtable_destroy(&macsec->sci_hash);
-err_hash:
kfree(macsec);
priv->macsec = NULL;
return err;
@@ -1858,10 +1767,9 @@ void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5_notifier_unregister(mdev, &macsec->nb);
- mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+ mlx5_macsec_fs_cleanup(mdev->macsec_fs);
destroy_workqueue(macsec->wq);
mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
- rhashtable_destroy(&macsec->sci_hash);
mutex_destroy(&macsec->lock);
kfree(macsec);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
index 347380a2cd9c..27df72e23106 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -4,32 +4,16 @@
#ifndef __MLX5_EN_ACCEL_MACSEC_H__
#define __MLX5_EN_ACCEL_MACSEC_H__
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
#include <linux/mlx5/driver.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
-
-/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
-#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
-#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
-#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
-#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+#include "lib/macsec_fs.h"
struct mlx5e_priv;
struct mlx5e_macsec;
-struct mlx5e_macsec_stats {
- u64 macsec_rx_pkts;
- u64 macsec_rx_bytes;
- u64 macsec_rx_pkts_drop;
- u64 macsec_rx_bytes_drop;
- u64 macsec_tx_pkts;
- u64 macsec_tx_bytes;
- u64 macsec_tx_pkts_drop;
- u64 macsec_tx_bytes_drop;
-};
-
void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
int mlx5e_macsec_init(struct mlx5e_priv *priv);
void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
@@ -52,9 +36,6 @@ static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
-bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
-void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
-struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
#else
@@ -67,7 +48,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{}
-static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
-#endif /* CONFIG_MLX5_EN_MACSEC */
+#endif /* CONFIG_MLX5_MACSEC */
#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
deleted file mode 100644
index 414e28584881..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+++ /dev/null
@@ -1,1394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#include <net/macsec.h>
-#include <linux/netdevice.h>
-#include <linux/mlx5/qp.h>
-#include <linux/if_vlan.h>
-#include "fs_core.h"
-#include "en/fs.h"
-#include "en_accel/macsec_fs.h"
-#include "mlx5_core.h"
-
-/* MACsec TX flow steering */
-#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
-#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
-
-#define TX_CRYPTO_TABLE_LEVEL 0
-#define TX_CRYPTO_TABLE_NUM_GROUPS 3
-#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
-#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
- (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
- CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
-#define TX_CHECK_TABLE_LEVEL 1
-#define TX_CHECK_TABLE_NUM_FTE 2
-#define RX_CRYPTO_TABLE_LEVEL 0
-#define RX_CHECK_TABLE_LEVEL 1
-#define RX_CHECK_TABLE_NUM_FTE 3
-#define RX_CRYPTO_TABLE_NUM_GROUPS 3
-#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
- ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
-#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
- (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
-#define RX_NUM_OF_RULES_PER_SA 2
-
-#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
-#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
-#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
-#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
-#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
-#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
-#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
-struct mlx5_sectag_header {
- __be16 ethertype;
- u8 tci_an;
- u8 sl;
- u32 pn;
- u8 sci[MACSEC_SCI_LEN]; /* optional */
-} __packed;
-
-struct mlx5e_macsec_tx_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_pkt_reformat *pkt_reformat;
- u32 fs_id;
-};
-
-struct mlx5e_macsec_tables {
- struct mlx5e_flow_table ft_crypto;
- struct mlx5_flow_handle *crypto_miss_rule;
-
- struct mlx5_flow_table *ft_check;
- struct mlx5_flow_group *ft_check_group;
- struct mlx5_fc *check_miss_rule_counter;
- struct mlx5_flow_handle *check_miss_rule;
- struct mlx5_fc *check_rule_counter;
-
- u32 refcnt;
-};
-
-struct mlx5e_macsec_tx {
- struct mlx5_flow_handle *crypto_mke_rule;
- struct mlx5_flow_handle *check_rule;
-
- struct ida tx_halloc;
-
- struct mlx5e_macsec_tables tables;
-};
-
-struct mlx5e_macsec_rx_rule {
- struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
- struct mlx5_modify_hdr *meta_modhdr;
-};
-
-struct mlx5e_macsec_rx {
- struct mlx5_flow_handle *check_rule[2];
- struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
-
- struct mlx5e_macsec_tables tables;
-};
-
-union mlx5e_macsec_rule {
- struct mlx5e_macsec_tx_rule tx_rule;
- struct mlx5e_macsec_rx_rule rx_rule;
-};
-
-struct mlx5e_macsec_fs {
- struct mlx5_core_dev *mdev;
- struct net_device *netdev;
- struct mlx5e_macsec_tx *tx_fs;
- struct mlx5e_macsec_rx *rx_fs;
-};
-
-static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5e_macsec_tables *tx_tables;
-
- tx_tables = &tx_fs->tables;
-
- /* Tx check table */
- if (tx_fs->check_rule) {
- mlx5_del_flow_rules(tx_fs->check_rule);
- tx_fs->check_rule = NULL;
- }
-
- if (tx_tables->check_miss_rule) {
- mlx5_del_flow_rules(tx_tables->check_miss_rule);
- tx_tables->check_miss_rule = NULL;
- }
-
- if (tx_tables->ft_check_group) {
- mlx5_destroy_flow_group(tx_tables->ft_check_group);
- tx_tables->ft_check_group = NULL;
- }
-
- if (tx_tables->ft_check) {
- mlx5_destroy_flow_table(tx_tables->ft_check);
- tx_tables->ft_check = NULL;
- }
-
- /* Tx crypto table */
- if (tx_fs->crypto_mke_rule) {
- mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
- tx_fs->crypto_mke_rule = NULL;
- }
-
- if (tx_tables->crypto_miss_rule) {
- mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
- tx_tables->crypto_miss_rule = NULL;
- }
-
- mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
-}
-
-static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
- in = kvzalloc(inlen, GFP_KERNEL);
-
- if (!in) {
- kfree(ft->g);
- ft->g = NULL;
- return -ENOMEM;
- }
-
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- /* Flow Group for MKE match */
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for SA rules */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC_MASK);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for l2 traps */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-static struct mlx5_flow_table
- *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
- int level, int max_fte)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *fdb = NULL;
-
- /* reserve entry for the match all miss group and rule */
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.prio = 0;
- ft_attr.flags = flags;
- ft_attr.level = level;
- ft_attr.max_fte = max_fte;
-
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
-
- return fdb;
-}
-
-static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5_flow_act flow_act = {};
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_table *flow_table;
- struct mlx5_flow_group *flow_group;
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- u32 *flow_group_in;
- int err;
-
- ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
- if (!ns)
- return -ENOMEM;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in) {
- err = -ENOMEM;
- goto out_spec;
- }
-
- tx_tables = &tx_fs->tables;
- ft_crypto = &tx_tables->ft_crypto;
-
- /* Tx crypto table */
- ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
- ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
-
- flow_table = mlx5_create_flow_table(ns, &ft_attr);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
- goto out_flow_group;
- }
- ft_crypto->t = flow_table;
-
- /* Tx crypto table groups */
- err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
- if (err) {
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
-
- /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
-
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
- goto err;
- }
- tx_fs->crypto_mke_rule = rule;
-
- /* Tx crypto table Default miss rule */
- memset(&flow_act, 0, sizeof(flow_act));
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
- goto err;
- }
- tx_tables->crypto_miss_rule = rule;
-
- /* Tx check table */
- flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
- TX_CHECK_TABLE_NUM_FTE);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
- goto err;
- }
- tx_tables->ft_check = flow_table;
-
- /* Tx check table Default miss group/rule */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
- flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
- if (IS_ERR(flow_group)) {
- err = PTR_ERR(flow_group);
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
- tx_tables->ft_check_group = flow_group;
-
- /* Tx check table default drop rule */
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
- goto err;
- }
- tx_tables->check_miss_rule = rule;
-
- /* Tx check table rule */
- memset(spec, 0, sizeof(struct mlx5_flow_spec));
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
-
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
- rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
- goto err;
- }
- tx_fs->check_rule = rule;
-
- goto out_flow_group;
-
-err:
- macsec_fs_tx_destroy(macsec_fs);
-out_flow_group:
- kvfree(flow_group_in);
-out_spec:
- kvfree(spec);
- return err;
-}
-
-static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5e_macsec_tables *tx_tables;
- int err = 0;
-
- tx_tables = &tx_fs->tables;
- if (tx_tables->refcnt)
- goto out;
-
- err = macsec_fs_tx_create(macsec_fs);
- if (err)
- return err;
-
-out:
- tx_tables->refcnt++;
- return err;
-}
-
-static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
-
- if (--tx_tables->refcnt)
- return;
-
- macsec_fs_tx_destroy(macsec_fs);
-}
-
-static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- u32 macsec_obj_id,
- u32 *fs_id)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- int err = 0;
- u32 id;
-
- err = ida_alloc_range(&tx_fs->tx_halloc, 1,
- MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
- GFP_KERNEL);
- if (err < 0)
- return err;
-
- id = err;
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
-
- /* Metadata match */
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC_MASK);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
-
- *fs_id = id;
- flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
- flow_act->crypto.obj_id = macsec_obj_id;
-
- mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
- return 0;
-}
-
-static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
- char *reformatbf,
- size_t *reformat_size)
-{
- const struct macsec_secy *secy = ctx->secy;
- bool sci_present = macsec_send_sci(secy);
- struct mlx5_sectag_header sectag = {};
- const struct macsec_tx_sc *tx_sc;
-
- tx_sc = &secy->tx_sc;
- sectag.ethertype = htons(ETH_P_MACSEC);
-
- if (sci_present) {
- sectag.tci_an |= MACSEC_TCI_SC;
- memcpy(&sectag.sci, &secy->sci,
- sizeof(sectag.sci));
- } else {
- if (tx_sc->end_station)
- sectag.tci_an |= MACSEC_TCI_ES;
- if (tx_sc->scb)
- sectag.tci_an |= MACSEC_TCI_SCB;
- }
-
- /* With GCM, C/E clear for !encrypt, both set for encrypt */
- if (tx_sc->encrypt)
- sectag.tci_an |= MACSEC_TCI_CONFID;
- else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
- sectag.tci_an |= MACSEC_TCI_C;
-
- sectag.tci_an |= tx_sc->encoding_sa;
-
- *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
-
- memcpy(reformatbf, &sectag, *reformat_size);
-}
-
-static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5e_macsec_tx_rule *tx_rule)
-{
- if (tx_rule->rule) {
- mlx5_del_flow_rules(tx_rule->rule);
- tx_rule->rule = NULL;
- }
-
- if (tx_rule->pkt_reformat) {
- mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
- tx_rule->pkt_reformat = NULL;
- }
-
- if (tx_rule->fs_id) {
- ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
- tx_rule->fs_id = 0;
- }
-
- kfree(tx_rule);
-
- macsec_fs_tx_ft_put(macsec_fs);
-}
-
-#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
-
-static union mlx5e_macsec_rule *
-macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *macsec_ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id)
-{
- char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
- struct mlx5_pkt_reformat_params reformat_params = {};
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- union mlx5e_macsec_rule *macsec_rule = NULL;
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5e_macsec_tx_rule *tx_rule;
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- size_t reformat_size;
- int err = 0;
- u32 fs_id;
-
- tx_tables = &tx_fs->tables;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return NULL;
-
- err = macsec_fs_tx_ft_get(macsec_fs);
- if (err)
- goto out_spec;
-
- macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
- if (!macsec_rule) {
- macsec_fs_tx_ft_put(macsec_fs);
- goto out_spec;
- }
-
- tx_rule = &macsec_rule->tx_rule;
-
- /* Tx crypto table crypto rule */
- macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
-
- reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
- reformat_params.size = reformat_size;
- reformat_params.data = reformatbf;
-
- if (is_vlan_dev(macsec_ctx->netdev))
- reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
-
- flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
- if (IS_ERR(flow_act.pkt_reformat)) {
- err = PTR_ERR(flow_act.pkt_reformat);
- netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
- goto err;
- }
- tx_rule->pkt_reformat = flow_act.pkt_reformat;
-
- err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
- if (err) {
- netdev_err(netdev,
- "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
- err);
- goto err;
- }
-
- tx_rule->fs_id = fs_id;
- *sa_fs_id = fs_id;
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = tx_tables->ft_check;
- rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
- goto err;
- }
- tx_rule->rule = rule;
-
- goto out_spec;
-
-err:
- macsec_fs_tx_del_rule(macsec_fs, tx_rule);
- macsec_rule = NULL;
-out_spec:
- kvfree(spec);
-
- return macsec_rule;
-}
-
-static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *tx_tables;
-
- if (!tx_fs)
- return;
-
- tx_tables = &tx_fs->tables;
- if (tx_tables->refcnt) {
- netdev_err(macsec_fs->netdev,
- "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
- tx_tables->refcnt);
- return;
- }
-
- ida_destroy(&tx_fs->tx_halloc);
-
- if (tx_tables->check_miss_rule_counter) {
- mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
- tx_tables->check_miss_rule_counter = NULL;
- }
-
- if (tx_tables->check_rule_counter) {
- mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
- tx_tables->check_rule_counter = NULL;
- }
-
- kfree(tx_fs);
- macsec_fs->tx_fs = NULL;
-}
-
-static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5e_macsec_tx *tx_fs;
- struct mlx5_fc *flow_counter;
- int err;
-
- tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
- if (!tx_fs)
- return -ENOMEM;
-
- tx_tables = &tx_fs->tables;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
- err);
- goto err_encrypt_counter;
- }
- tx_tables->check_rule_counter = flow_counter;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Tx drop flow counter, err(%d)\n",
- err);
- goto err_drop_counter;
- }
- tx_tables->check_miss_rule_counter = flow_counter;
-
- ida_init(&tx_fs->tx_halloc);
-
- macsec_fs->tx_fs = tx_fs;
-
- return 0;
-
-err_drop_counter:
- mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
- tx_tables->check_rule_counter = NULL;
-
-err_encrypt_counter:
- kfree(tx_fs);
- macsec_fs->tx_fs = NULL;
-
- return err;
-}
-
-static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct mlx5e_macsec_tables *rx_tables;
- int i;
-
- /* Rx check table */
- for (i = 1; i >= 0; --i) {
- if (rx_fs->check_rule[i]) {
- mlx5_del_flow_rules(rx_fs->check_rule[i]);
- rx_fs->check_rule[i] = NULL;
- }
-
- if (rx_fs->check_rule_pkt_reformat[i]) {
- mlx5_packet_reformat_dealloc(macsec_fs->mdev,
- rx_fs->check_rule_pkt_reformat[i]);
- rx_fs->check_rule_pkt_reformat[i] = NULL;
- }
- }
-
- rx_tables = &rx_fs->tables;
-
- if (rx_tables->check_miss_rule) {
- mlx5_del_flow_rules(rx_tables->check_miss_rule);
- rx_tables->check_miss_rule = NULL;
- }
-
- if (rx_tables->ft_check_group) {
- mlx5_destroy_flow_group(rx_tables->ft_check_group);
- rx_tables->ft_check_group = NULL;
- }
-
- if (rx_tables->ft_check) {
- mlx5_destroy_flow_table(rx_tables->ft_check);
- rx_tables->ft_check = NULL;
- }
-
- /* Rx crypto table */
- if (rx_tables->crypto_miss_rule) {
- mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
- rx_tables->crypto_miss_rule = NULL;
- }
-
- mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
-}
-
-static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
-
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- kfree(ft->g);
- return -ENOMEM;
- }
-
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- /* Flow group for SA rule with SCI */
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS_5);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
- MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
- MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow group for SA rule without SCI */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS_5);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for l2 traps */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_spec *spec,
- int reformat_param_size)
-{
- int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
- u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
- struct mlx5_pkt_reformat_params reformat_params = {};
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5_flow_handle *rule;
- int err = 0;
-
- rx_tables = &rx_fs->tables;
-
- /* Rx check table decap 16B rule */
- memset(dest, 0, sizeof(*dest));
- memset(flow_act, 0, sizeof(*flow_act));
- memset(spec, 0, sizeof(*spec));
-
- reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
- reformat_params.size = reformat_param_size;
- reformat_params.data = mlx5_reformat_buf;
- flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
- if (IS_ERR(flow_act->pkt_reformat)) {
- err = PTR_ERR(flow_act->pkt_reformat);
- netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
- return err;
- }
- rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
-
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- /* MACsec syndrome match */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
- /* ASO return reg syndrome match */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
-
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
- /* Sectag TCI SC present bit*/
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- flow_act->flags = FLOW_ACT_NO_APPEND;
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
- rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
- return err;
- }
-
- rx_fs->check_rule[rule_index] = rule;
-
- return 0;
-}
-
-static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_table *flow_table;
- struct mlx5_flow_group *flow_group;
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- u32 *flow_group_in;
- int err;
-
- ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
- if (!ns)
- return -ENOMEM;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in) {
- err = -ENOMEM;
- goto free_spec;
- }
-
- rx_tables = &rx_fs->tables;
- ft_crypto = &rx_tables->ft_crypto;
-
- /* Rx crypto table */
- ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
- ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
-
- flow_table = mlx5_create_flow_table(ns, &ft_attr);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
- goto out_flow_group;
- }
- ft_crypto->t = flow_table;
-
- /* Rx crypto table groups */
- err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
- if (err) {
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add MACsec Rx crypto table default miss rule %d\n",
- err);
- goto err;
- }
- rx_tables->crypto_miss_rule = rule;
-
- /* Rx check table */
- flow_table = macsec_fs_auto_group_table_create(ns,
- MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
- RX_CHECK_TABLE_LEVEL,
- RX_CHECK_TABLE_NUM_FTE);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
- goto err;
- }
- rx_tables->ft_check = flow_table;
-
- /* Rx check table Default miss group/rule */
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
- flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
- if (IS_ERR(flow_group)) {
- err = PTR_ERR(flow_group);
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Rx check table err(%d)\n",
- err);
- goto err;
- }
- rx_tables->ft_check_group = flow_group;
-
- /* Rx check table default drop rule */
- memset(&flow_act, 0, sizeof(flow_act));
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
- goto err;
- }
- rx_tables->check_miss_rule = rule;
-
- /* Rx check table decap rules */
- err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
- MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
- if (err)
- goto err;
-
- err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
- MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
- if (err)
- goto err;
-
- goto out_flow_group;
-
-err:
- macsec_fs_rx_destroy(macsec_fs);
-out_flow_group:
- kvfree(flow_group_in);
-free_spec:
- kvfree(spec);
- return err;
-}
-
-static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
- int err = 0;
-
- if (rx_tables->refcnt)
- goto out;
-
- err = macsec_fs_rx_create(macsec_fs);
- if (err)
- return err;
-
-out:
- rx_tables->refcnt++;
- return err;
-}
-
-static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
-
- if (--rx_tables->refcnt)
- return;
-
- macsec_fs_rx_destroy(macsec_fs);
-}
-
-static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5e_macsec_rx_rule *rx_rule)
-{
- int i;
-
- for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
- if (rx_rule->rule[i]) {
- mlx5_del_flow_rules(rx_rule->rule[i]);
- rx_rule->rule[i] = NULL;
- }
- }
-
- if (rx_rule->meta_modhdr) {
- mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
- rx_rule->meta_modhdr = NULL;
- }
-
- kfree(rx_rule);
-
- macsec_fs_rx_ft_put(macsec_fs);
-}
-
-static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_macsec_rule_attrs *attrs,
- bool sci_present)
-{
- u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
- struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
- __be32 *sci_p = (__be32 *)(&attrs->sci);
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
- /* MACsec ethertype */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
-
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
-
- /* Sectag AN + TCI SC present bit*/
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
- tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- if (sci_present) {
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_5.macsec_tag_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
- be32_to_cpu(sci_p[0]));
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_5.macsec_tag_3);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
- be32_to_cpu(sci_p[1]));
- } else {
- /* When SCI isn't present in the Sectag, need to match the source */
- /* MAC address only if the SCI contains the default MACsec PORT */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
- sci_p, ETH_ALEN);
- }
-
- crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
- crypto_params->obj_id = attrs->macsec_obj_id;
-}
-
-static union mlx5e_macsec_rule *
-macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 fs_id)
-{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- union mlx5e_macsec_rule *macsec_rule = NULL;
- struct mlx5_modify_hdr *modify_hdr = NULL;
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_macsec_rx_rule *rx_rule;
- struct mlx5_flow_act flow_act = {};
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return NULL;
-
- err = macsec_fs_rx_ft_get(macsec_fs);
- if (err)
- goto out_spec;
-
- macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
- if (!macsec_rule) {
- macsec_fs_rx_ft_put(macsec_fs);
- goto out_spec;
- }
-
- rx_rule = &macsec_rule->rx_rule;
- rx_tables = &rx_fs->tables;
- ft_crypto = &rx_tables->ft_crypto;
-
- /* Set bit[31 - 30] macsec marker - 0x01 */
- /* Set bit[15-0] fs id */
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
-
- modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
- 1, action);
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
- netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
- modify_hdr = NULL;
- goto err;
- }
- rx_rule->meta_modhdr = modify_hdr;
-
- /* Rx crypto table with SCI rule */
- macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
-
- flow_act.modify_hdr = modify_hdr;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx_tables->ft_check;
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
- err);
- goto err;
- }
- rx_rule->rule[0] = rule;
-
- /* Rx crypto table without SCI rule */
- if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
- memset(spec, 0, sizeof(struct mlx5_flow_spec));
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
-
- macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
-
- flow_act.modify_hdr = modify_hdr;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx_tables->ft_check;
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
- err);
- goto err;
- }
- rx_rule->rule[1] = rule;
- }
-
- kvfree(spec);
- return macsec_rule;
-
-err:
- macsec_fs_rx_del_rule(macsec_fs, rx_rule);
- macsec_rule = NULL;
-out_spec:
- kvfree(spec);
- return macsec_rule;
-}
-
-static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_macsec_rx *rx_fs;
- struct mlx5_fc *flow_counter;
- int err;
-
- rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
- if (!rx_fs)
- return -ENOMEM;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
- err);
- goto err_encrypt_counter;
- }
-
- rx_tables = &rx_fs->tables;
- rx_tables->check_rule_counter = flow_counter;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Rx drop flow counter, err(%d)\n",
- err);
- goto err_drop_counter;
- }
- rx_tables->check_miss_rule_counter = flow_counter;
-
- macsec_fs->rx_fs = rx_fs;
-
- return 0;
-
-err_drop_counter:
- mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
- rx_tables->check_rule_counter = NULL;
-
-err_encrypt_counter:
- kfree(rx_fs);
- macsec_fs->rx_fs = NULL;
-
- return err;
-}
-
-static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *rx_tables;
-
- if (!rx_fs)
- return;
-
- rx_tables = &rx_fs->tables;
-
- if (rx_tables->refcnt) {
- netdev_err(macsec_fs->netdev,
- "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
- rx_tables->refcnt);
- return;
- }
-
- if (rx_tables->check_miss_rule_counter) {
- mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
- rx_tables->check_miss_rule_counter = NULL;
- }
-
- if (rx_tables->check_rule_counter) {
- mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
- rx_tables->check_rule_counter = NULL;
- }
-
- kfree(rx_fs);
- macsec_fs->rx_fs = NULL;
-}
-
-void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
-{
- struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
- struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
-
- if (tx_tables->check_rule_counter)
- mlx5_fc_query(mdev, tx_tables->check_rule_counter,
- &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
-
- if (tx_tables->check_miss_rule_counter)
- mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
- &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
-
- if (rx_tables->check_rule_counter)
- mlx5_fc_query(mdev, rx_tables->check_rule_counter,
- &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
-
- if (rx_tables->check_miss_rule_counter)
- mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
- &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
-}
-
-union mlx5e_macsec_rule *
-mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *macsec_ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id)
-{
- return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
- macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
- macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id);
-}
-
-void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- union mlx5e_macsec_rule *macsec_rule,
- int action)
-{
- (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
- macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
- macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
-}
-
-void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- macsec_fs_rx_cleanup(macsec_fs);
- macsec_fs_tx_cleanup(macsec_fs);
- kfree(macsec_fs);
-}
-
-struct mlx5e_macsec_fs *
-mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev)
-{
- struct mlx5e_macsec_fs *macsec_fs;
- int err;
-
- macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
- if (!macsec_fs)
- return NULL;
-
- macsec_fs->mdev = mdev;
- macsec_fs->netdev = netdev;
-
- err = macsec_fs_tx_init(macsec_fs);
- if (err) {
- netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
- goto err;
- }
-
- err = macsec_fs_rx_init(macsec_fs);
- if (err) {
- netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
- goto tx_cleanup;
- }
-
- return macsec_fs;
-
-tx_cleanup:
- macsec_fs_tx_cleanup(macsec_fs);
-err:
- kfree(macsec_fs);
- return NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
deleted file mode 100644
index b429648d4ee7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#ifndef __MLX5_MACSEC_STEERING_H__
-#define __MLX5_MACSEC_STEERING_H__
-
-#ifdef CONFIG_MLX5_EN_MACSEC
-
-#include "en_accel/macsec.h"
-
-#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
-
-struct mlx5e_macsec_fs;
-union mlx5e_macsec_rule;
-
-struct mlx5_macsec_rule_attrs {
- sci_t sci;
- u32 macsec_obj_id;
- u8 assoc_num;
- int action;
-};
-
-enum mlx5_macsec_action {
- MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
-};
-
-void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
-
-struct mlx5e_macsec_fs *
-mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
-
-union mlx5e_macsec_rule *
-mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id);
-
-void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- union mlx5e_macsec_rule *macsec_rule,
- int action);
-
-void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
-
-#endif
-
-#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
index e50a2e3f3d18..4559ee16a11a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -8,14 +8,14 @@
#include "en_accel/macsec.h"
static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes_drop) },
};
#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
@@ -52,6 +52,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
{
+ struct mlx5_macsec_fs *macsec_fs;
int i;
if (!priv->macsec)
@@ -60,9 +61,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
if (!mlx5e_is_macsec_device(priv->mdev))
return idx;
- mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ macsec_fs = priv->mdev->macsec_fs;
+ mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
mlx5e_macsec_hw_stats_desc,
i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 348fb140858c..4b96ad657145 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2502,7 +2502,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
&MLX5E_STATS_GRP(qos),
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
&MLX5E_STATS_GRP(macsec_hw),
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 244cfd470903..a4b925331661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -975,6 +975,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
table_type = FS_FT_ESW_INGRESS_ACL;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
table_type = FS_FT_RDMA_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a3228502f866..a13b9c2bd144 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -107,7 +107,7 @@
LEFTOVERS_NUM_PRIOS)
#define KERNEL_RX_MACSEC_NUM_PRIOS 1
-#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_NUM_LEVELS 3
#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
@@ -224,6 +224,7 @@ static struct init_tree_node egress_root_fs = {
enum {
RDMA_RX_IPSEC_PRIO,
+ RDMA_RX_MACSEC_PRIO,
RDMA_RX_COUNTERS_PRIO,
RDMA_RX_BYPASS_PRIO,
RDMA_RX_KERNEL_PRIO,
@@ -237,9 +238,13 @@ enum {
#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+#define RDMA_RX_MACSEC_NUM_PRIOS 1
+#define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2
+#define RDMA_RX_MACSEC_MIN_LEVEL (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 4,
+ .ar_size = 5,
.children = (struct init_tree_node[]) {
[RDMA_RX_IPSEC_PRIO] =
ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
@@ -247,6 +252,12 @@ static struct init_tree_node rdma_rx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
RDMA_RX_IPSEC_NUM_LEVELS))),
+ [RDMA_RX_MACSEC_PRIO] =
+ ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS,
+ RDMA_RX_MACSEC_PRIO_NUM_LEVELS))),
[RDMA_RX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
@@ -270,6 +281,7 @@ static struct init_tree_node rdma_rx_root_fs = {
enum {
RDMA_TX_COUNTERS_PRIO,
RDMA_TX_IPSEC_PRIO,
+ RDMA_TX_MACSEC_PRIO,
RDMA_TX_BYPASS_PRIO,
};
@@ -280,9 +292,13 @@ enum {
#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
+#define RDMA_TX_MACSEC_NUM_PRIOS 1
+#define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1
+#define RDMA_TX_MACSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 3,
+ .ar_size = 4,
.children = (struct init_tree_node[]) {
[RDMA_TX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
@@ -296,7 +312,12 @@ static struct init_tree_node rdma_tx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
-
+ [RDMA_TX_MACSEC_PRIO] =
+ ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS,
+ RDMA_TX_MACESC_PRIO_NUM_LEVELS))),
[RDMA_TX_BYPASS_PRIO] =
ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
@@ -2466,6 +2487,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_IPSEC_PRIO;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_MACSEC_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_MACSEC_PRIO;
+ break;
default: /* Must be NIC RX */
WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
new file mode 100644
index 000000000000..2f2cb67717cd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -0,0 +1,2410 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/mlx5/qp.h>
+#include <linux/if_vlan.h>
+#include <linux/mlx5/fs_helpers.h>
+#include <linux/mlx5/macsec.h>
+#include "fs_core.h"
+#include "lib/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_ROCE_TABLE_LEVEL 2
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_ROCE_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define RDMA_RX_ROCE_IP_TABLE_LEVEL 0
+#define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+/* MACsec fs_id handling for steering */
+#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
+#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5_roce_macsec_tx_rule {
+ u32 fs_id;
+ u16 gid_idx;
+ struct list_head entry;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5_macsec_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5_macsec_tables {
+ struct mlx5_macsec_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5_fs_id {
+ u32 id;
+ refcount_t refcnt;
+ sci_t sci;
+ struct rhash_head hash;
+};
+
+struct mlx5_macsec_device {
+ struct list_head macsec_devices_list_entry;
+ void *macdev;
+ struct xarray tx_id_xa;
+ struct xarray rx_id_xa;
+};
+
+struct mlx5_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5_macsec_tables tables;
+
+ struct mlx5_flow_table *ft_rdma_tx;
+};
+
+struct mlx5_roce_macsec_rx_rule {
+ u32 fs_id;
+ u16 gid_idx;
+ struct mlx5_flow_handle *op;
+ struct mlx5_flow_handle *ip;
+ struct list_head entry;
+};
+
+struct mlx5_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5_macsec_miss {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_macsec_rx_roce {
+ /* Flow table/rules in NIC domain, to check if it's a RoCE packet */
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+ struct mlx5_macsec_miss nic_miss;
+
+ /* Flow table/rule in RDMA domain, to check dgid */
+ struct mlx5_flow_table *ft_ip_check;
+ struct mlx5_flow_table *ft_macsec_op_check;
+ struct mlx5_macsec_miss miss;
+};
+
+struct mlx5_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5_macsec_tables tables;
+ struct mlx5_macsec_rx_roce roce;
+};
+
+union mlx5_macsec_rule {
+ struct mlx5_macsec_tx_rule tx_rule;
+ struct mlx5_macsec_rx_rule rx_rule;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5_fs_id, sci),
+ .key_offset = offsetof(struct mlx5_fs_id, sci),
+ .head_offset = offsetof(struct mlx5_fs_id, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+static const struct rhashtable_params rhash_fs_id = {
+ .key_len = sizeof_field(struct mlx5_fs_id, id),
+ .key_offset = offsetof(struct mlx5_fs_id, id),
+ .head_offset = offsetof(struct mlx5_fs_id, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_macsec_tx *tx_fs;
+ struct mlx5_macsec_rx *rx_fs;
+
+ /* Stats manage */
+ struct mlx5_macsec_stats stats;
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5_fs_id */
+
+ /* RX fs_id -> mlx5_fs_id mapping handling */
+ struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */
+
+ /* TX & RX fs_id lists per macsec device */
+ struct list_head macsec_devices_list;
+};
+
+static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int i;
+
+ for (i = ft->num_groups - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(ft->g[i]))
+ mlx5_destroy_flow_group(ft->g[i]);
+ ft->g[i] = NULL;
+ }
+ ft->num_groups = 0;
+}
+
+static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft)
+{
+ macsec_fs_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+}
+
+static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_macsec_tables *tx_tables;
+
+ if (mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ mlx5_destroy_flow_table(tx_fs->ft_rdma_tx);
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ macsec_fs_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+enum {
+ RDMA_TX_MACSEC_LEVEL = 0,
+};
+
+static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!mlx5_is_macsec_roce_supported(mdev)) {
+ mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
+ return 0;
+ }
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ /* Tx RoCE crypto table */
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err);
+ return err;
+ }
+ tx_fs->ft_rdma_tx = ft;
+
+ return 0;
+}
+
+static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto out_spec;
+ }
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ err = macsec_fs_tx_roce_create(macsec_fs);
+ if (err)
+ goto err;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ macsec_fs_set_tx_fs_id(id));
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device)
+{
+ if (xa_empty(&macsec_device->tx_id_xa) &&
+ xa_empty(&macsec_device->rx_id_xa))
+ return true;
+
+ return false;
+}
+
+static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id,
+ void *macdev, struct rhashtable *hash_table, bool is_tx)
+{
+ const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_fs_id *fs_id_found;
+ struct xarray *fs_id_xa;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+ WARN_ON(!macsec_device);
+
+ fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
+ &macsec_device->rx_id_xa;
+ xa_lock(fs_id_xa);
+ fs_id_found = xa_load(fs_id_xa, fs_id);
+ WARN_ON(!fs_id_found);
+
+ if (!refcount_dec_and_test(&fs_id_found->refcnt)) {
+ xa_unlock(fs_id_xa);
+ return;
+ }
+
+ if (fs_id_found->id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash);
+ fs_id_found->id = 0;
+ }
+ xa_unlock(fs_id_xa);
+
+ xa_erase(fs_id_xa, fs_id);
+
+ kfree(fs_id_found);
+
+ if (macsec_fs_is_macsec_device_empty(macsec_device)) {
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+ }
+}
+
+static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id,
+ void *macdev, struct rhashtable *hash_table, sci_t sci,
+ bool is_tx)
+{
+ const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_fs_id *fs_id_iter;
+ struct xarray *fs_id_xa;
+ int err;
+
+ if (!is_tx) {
+ rcu_read_lock();
+ fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id);
+ if (fs_id_iter) {
+ refcount_inc(&fs_id_iter->refcnt);
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ }
+
+ fs_id_iter = kzalloc(sizeof(*fs_id_iter), GFP_KERNEL);
+ if (!fs_id_iter)
+ return -ENOMEM;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (!macsec_device) { /* first time adding a SA to that device */
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto err_alloc_dev;
+ }
+ macsec_device->macdev = macdev;
+ xa_init(&macsec_device->tx_id_xa);
+ xa_init(&macsec_device->rx_id_xa);
+ list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
+ }
+
+ fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
+ &macsec_device->rx_id_xa;
+ fs_id_iter->id = fs_id;
+ refcount_set(&fs_id_iter->refcnt, 1);
+ fs_id_iter->sci = sci;
+ err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL));
+ if (err)
+ goto err_store_id;
+
+ err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash);
+ if (err)
+ goto err_hash_insert;
+
+ return 0;
+
+err_hash_insert:
+ xa_erase(fs_id_xa, fs_id);
+err_store_id:
+ if (macsec_fs_is_macsec_device_empty(macsec_device)) {
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+ }
+err_alloc_dev:
+ kfree(fs_id_iter);
+ return err;
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_tx_rule *tx_rule,
+ void *macdev)
+{
+ macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev,
+ &macsec_fs->sci_hash, true);
+
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
+
+static union mlx5_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ union mlx5_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+
+ if (is_vlan_dev(macsec_ctx->netdev))
+ reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
+
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = *fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev,
+ &macsec_fs->sci_hash, attrs->sci, true);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
+ goto err;
+ }
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ mlx5_core_err(mdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+ INIT_LIST_HEAD(&macsec_fs->macsec_devices_list);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss)
+{
+ mlx5_del_flow_rules(miss->rule);
+ mlx5_destroy_flow_group(miss->g);
+}
+
+static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev)
+{
+ if (!mlx5_is_macsec_roce_supported(mdev))
+ return;
+
+ mlx5_del_flow_rules(roce->nic_miss.rule);
+ mlx5_del_flow_rules(roce->rule);
+ mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr);
+ mlx5_destroy_flow_group(roce->nic_miss.g);
+ mlx5_destroy_flow_group(roce->g);
+ mlx5_destroy_flow_table(roce->ft);
+
+ macsec_fs_rx_roce_miss_destroy(&roce->miss);
+ mlx5_destroy_flow_table(roce->ft_macsec_op_check);
+ mlx5_destroy_flow_table(roce->ft_ip_check);
+}
+
+static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ macsec_fs_destroy_flow_table(&rx_tables->ft_crypto);
+
+ macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_destination roce_dest[2];
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0, dstn = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+
+ if (rx_fs->roce.ft) {
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ roce_dest[dstn].ft = rx_fs->roce.ft;
+ dstn++;
+ } else {
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ }
+
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_handle *rule;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ roce->ft_macsec_op_check->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ roce->ft_macsec_op_check->max_fte - 1);
+ flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n",
+ err);
+ goto err_macsec_op_miss_group;
+ }
+ roce->miss.g = flow_group;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n",
+ err);
+ goto err_macsec_op_rule;
+ }
+ roce->miss.rule = rule;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_macsec_op_rule:
+ mlx5_destroy_flow_group(roce->miss.g);
+err_macsec_op_miss_group:
+ kvfree(flow_group_in);
+ return err;
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+
+static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ struct mlx5_flow_group *g;
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_udp_group;
+ }
+ roce->g = g;
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_udp_miss_group;
+ }
+ roce->nic_miss.g = g;
+
+ kvfree(in);
+ return 0;
+
+err_udp_miss_group:
+ mlx5_destroy_flow_group(roce->g);
+err_udp_group:
+ kvfree(in);
+ return err;
+}
+
+static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_destination dst = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT);
+
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 32);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5);
+ MLX5_SET(copy_action_in, action, dst_offset, 0);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "Failed to alloc macsec copy modify_header_id err(%d)\n", err);
+ goto err_alloc_hdr;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.modify_hdr = modify_hdr;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_ip_check;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_add_rule;
+ }
+ roce->rule = rule;
+ roce->copy_modify_hdr = modify_hdr;
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_add_rule2;
+ }
+ roce->nic_miss.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_rule2:
+ mlx5_del_flow_rules(roce->rule);
+err_add_rule:
+ mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr);
+err_alloc_hdr:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ int err;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce);
+ if (err)
+ return err;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5_destroy_flow_group(roce->nic_miss.g);
+ mlx5_destroy_flow_group(roce->g);
+ return err;
+}
+
+static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) {
+ mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
+ return 0;
+ }
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL,
+ CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec IP check RoCE table err(%d)\n", err);
+ return err;
+ }
+ rx_fs->roce.ft_ip_check = ft;
+
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL,
+ CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec operation check RoCE table err(%d)\n",
+ err);
+ goto err_macsec_op;
+ }
+ rx_fs->roce.ft_macsec_op_check = ft;
+
+ err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce);
+ if (err)
+ goto err_miss_create;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto err_ns;
+ }
+
+ ft_attr.level = RX_ROCE_TABLE_LEVEL;
+ ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err);
+ goto err_ns;
+ }
+ rx_fs->roce.ft = ft;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce);
+ if (err)
+ goto err_udp_ft;
+
+ return 0;
+
+err_udp_ft:
+ mlx5_destroy_flow_table(rx_fs->roce.ft);
+err_ns:
+ macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss);
+err_miss_create:
+ mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check);
+err_macsec_op:
+ mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check);
+ return err;
+}
+
+static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto free_spec;
+ }
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ err = macsec_fs_rx_roce_create(macsec_fs);
+ if (err)
+ goto out_flow_group;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto err;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_rule *rx_rule,
+ void *macdev, u32 fs_id)
+{
+ int i;
+
+ macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev,
+ &macsec_fs->fs_id_hash, false);
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ union mlx5_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[15-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev,
+ &macsec_fs->fs_id_hash, attrs->sci, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
+ goto err;
+ }
+
+ kvfree(spec);
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ mlx5_core_err(mdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip)
+{
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, MLX5_FS_IPV4_VERSION);
+
+ if (is_dst_ip) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &in->sin_addr.s_addr, 4);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &in->sin_addr.s_addr, 4);
+ }
+}
+
+static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec,
+ bool is_dst_ip)
+{
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, MLX5_FS_IPV6_VERSION);
+
+ if (is_dst_ip) {
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &in6->sin6_addr, 16);
+ } else {
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &in6->sin6_addr, 16);
+ }
+}
+
+static void set_ipaddr_spec(const struct sockaddr *addr,
+ struct mlx5_flow_spec *spec, bool is_dst_ip)
+{
+ struct sockaddr_in6 *in6;
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *in = (struct sockaddr_in *)addr;
+
+ set_ipaddr_spec_v4(in, spec, is_dst_ip);
+ return;
+ }
+
+ in6 = (struct sockaddr_in6 *)addr;
+ set_ipaddr_spec_v6(in6, spec, is_dst_ip);
+}
+
+static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule)
+{
+ mlx5_del_flow_rules(rx_rule->op);
+ mlx5_del_flow_rules(rx_rule->ip);
+ list_del(&rx_rule->entry);
+ kfree(rx_rule);
+}
+
+static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
+ struct list_head *rx_rules_list)
+{
+ struct mlx5_roce_macsec_rx_rule *rx_rule, *next;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ return;
+
+ list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) {
+ if (rx_rule->fs_id == fs_id)
+ macsec_fs_del_roce_rule_rx(rx_rule);
+ }
+}
+
+static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev,
+ struct mlx5_roce_macsec_tx_rule *tx_rule)
+{
+ mlx5_del_flow_rules(tx_rule->rule);
+ mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
+ list_del(&tx_rule->entry);
+ kfree(tx_rule);
+}
+
+static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
+ struct list_head *tx_rules_list)
+{
+ struct mlx5_roce_macsec_tx_rule *tx_rule, *next;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ return;
+
+ list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) {
+ if (tx_rule->fs_id == fs_id)
+ macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
+ }
+}
+
+void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats;
+ struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs)
+{
+ if (!macsec_fs)
+ return NULL;
+
+ return &macsec_fs->stats;
+}
+
+u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci)
+{
+ struct mlx5_fs_id *mlx5_fs_id;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci);
+ if (mlx5_fs_id)
+ fs_id = mlx5_fs_id->id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+union mlx5_macsec_rule *
+mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
+ .macdev = macsec_ctx->secy->netdev,
+ .is_tx =
+ (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
+ };
+ union mlx5_macsec_rule *macsec_rule;
+ u32 tx_new_fs_id;
+
+ macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+
+ data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id;
+ if (macsec_rule)
+ blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ &data);
+
+ return macsec_rule;
+}
+
+void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ union mlx5_macsec_rule *macsec_rule,
+ int action, void *macdev, u32 sa_fs_id)
+{
+ struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
+ .macdev = macdev,
+ .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
+ };
+
+ data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id;
+ blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ &data);
+
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id);
+}
+
+static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
+ const struct sockaddr *addr,
+ struct list_head *rx_rules_list)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_roce_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *new_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL);
+ if (!rx_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ set_ipaddr_spec(addr, spec, true);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.ft = rx_fs->roce.ft_macsec_op_check;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act,
+ &dest, 1);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ goto ip_rule_err;
+ }
+ rx_rule->ip = new_rule;
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5,
+ macsec_fs_set_rx_fs_id(fs_id));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act,
+ NULL, 0);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ goto op_rule_err;
+ }
+ rx_rule->op = new_rule;
+ rx_rule->gid_idx = gid_idx;
+ rx_rule->fs_id = fs_id;
+ list_add_tail(&rx_rule->entry, rx_rules_list);
+
+ goto out;
+
+op_rule_err:
+ mlx5_del_flow_rules(rx_rule->ip);
+ rx_rule->ip = NULL;
+ip_rule_err:
+ kfree(rx_rule);
+out:
+ kvfree(spec);
+ return err;
+}
+
+static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
+ const struct sockaddr *addr,
+ struct list_head *tx_rules_list)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_roce_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *new_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL);
+ if (!tx_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ set_ipaddr_spec(addr, spec, false);
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
+ MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n",
+ err);
+ modify_hdr = NULL;
+ goto modify_hdr_err;
+ }
+ tx_rule->meta_modhdr = modify_hdr;
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dest.ft = tx_fs->tables.ft_crypto.t;
+ new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err);
+ goto rule_err;
+ }
+ tx_rule->rule = new_rule;
+ tx_rule->gid_idx = gid_idx;
+ tx_rule->fs_id = fs_id;
+ list_add_tail(&tx_rule->entry, tx_rules_list);
+
+ goto out;
+
+rule_err:
+ mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
+modify_hdr_err:
+ kfree(tx_rule);
+out:
+ kvfree(spec);
+ return err;
+}
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list)
+{
+ struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx;
+ struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx;
+
+ list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) {
+ if (tx_rule->gid_idx == gid_idx)
+ macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
+ }
+
+ list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) {
+ if (rx_rule->gid_idx == gid_idx)
+ macsec_fs_del_roce_rule_rx(rx_rule);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule);
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_fs_id *fs_id_iter;
+ unsigned long index = 0;
+ int err;
+
+ list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (!macsec_device)
+ return 0;
+
+ xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) {
+ err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr,
+ tx_rules_list);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
+ goto out;
+ }
+ }
+
+ index = 0;
+ xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) {
+ err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr,
+ rx_rules_list);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx)
+{
+ (is_tx) ?
+ mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr,
+ tx_rules_list) :
+ mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr,
+ rx_rules_list);
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx)
+{
+ (is_tx) ?
+ macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) :
+ macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list);
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules);
+
+void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ rhashtable_destroy(&macsec_fs->fs_id_hash);
+ rhashtable_destroy(&macsec_fs->sci_hash);
+ kfree(macsec_fs);
+}
+
+struct mlx5_macsec_fs *
+mlx5_macsec_fs_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+
+ err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n",
+ err);
+ goto sci_hash_cleanup;
+ }
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto fs_id_hash_cleanup;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh);
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+fs_id_hash_cleanup:
+ rhashtable_destroy(&macsec_fs->fs_id_hash);
+sci_hash_cleanup:
+ rhashtable_destroy(&macsec_fs->sci_hash);
+err_hash:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
new file mode 100644
index 000000000000..34b80c3ef6a5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_MACSEC
+
+/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
+#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
+#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5_macsec_fs;
+union mlx5_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+struct mlx5_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs);
+
+struct mlx5_macsec_fs *
+mlx5_macsec_fs_init(struct mlx5_core_dev *mdev);
+
+union mlx5_macsec_rule *
+mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ union mlx5_macsec_rule *macsec_rule,
+ int action, void *macdev, u32 sa_fs_id);
+
+void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats);
+struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs);
+u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ae60817ec5c2..c3f30663070f 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -4229,6 +4229,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
return dev_net(macsec_priv(dev)->real_dev);
}
+struct net_device *macsec_get_real_dev(const struct net_device *dev)
+{
+ return macsec_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL_GPL(macsec_get_real_dev);
+
+bool macsec_netdev_is_offloaded(struct net_device *dev)
+{
+ return macsec_is_offloaded(macsec_priv(dev));
+}
+EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
+
static size_t macsec_get_size(const struct net_device *dev)
{
return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 93399802ba77..4d5be378fa8c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -364,6 +364,8 @@ enum mlx5_event {
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index c9d82e74daaa..e95f10066eac 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -727,7 +727,6 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
-struct mlx5_thermal;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -809,6 +808,11 @@ struct mlx5_core_dev {
struct mlx5_hwmon *hwmon;
u64 num_block_tc;
u64 num_block_ipsec;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec_fs *macsec_fs;
+ /* MACsec notifier chain to sync MACsec core and IB database */
+ struct blocking_notifier_head macsec_nh;
+#endif
};
struct mlx5_db {
@@ -1322,6 +1326,52 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
return mlx5_is_roce_on(dev);
}
+#ifdef CONFIG_MLX5_MACSEC
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) ||
+ !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs)
+ return false;
+
+ return true;
+}
+#endif
+
enum {
MLX5_OCTWORD = 16,
};
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index c302ec34255b..1e00c2436377 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -105,6 +105,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
};
enum {
diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h
new file mode 100644
index 000000000000..f7ff4c2a95d0
--- /dev/null
+++ b/include/linux/mlx5/macsec.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef MLX5_MACSEC_H
+#define MLX5_MACSEC_H
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_macsec_event_data {
+ struct mlx5_macsec_fs *macsec_fs;
+ void *macdev;
+ u32 fs_id;
+ bool is_tx;
+};
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs);
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx);
+
+#endif
+#endif /* MLX5_MACSEC_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 441ed8fd4b5f..75a6f4863c83 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -312,6 +312,8 @@ static inline bool macsec_send_sci(const struct macsec_secy *secy)
return tx_sc->send_sci ||
(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
}
+struct net_device *macsec_get_real_dev(const struct net_device *dev);
+bool macsec_netdev_is_offloaded(struct net_device *dev);
static inline void *macsec_netdev_priv(const struct net_device *dev)
{