summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1668
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c300
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c110
11 files changed, 2428 insertions, 59 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index d463dc72d80a..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..64f3acd7f67b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ };
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[0] = ~0ULL;
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->protect_frames == txsc->last_protect_frames)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_protect_frames)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->protect_frames)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d686c7b6252f..9ac9e6615ae7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -586,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -602,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -611,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -619,14 +626,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
@@ -635,11 +642,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -650,8 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
@@ -676,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -806,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -1792,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b28029cc4316..282db6fe3b08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,6 +19,7 @@
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@@ -33,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -40,6 +42,11 @@
#define NAME_SIZE 32
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -196,7 +203,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
- u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -238,6 +245,8 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -271,6 +280,13 @@ struct refill_work {
struct otx2_nic *pf;
};
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -286,6 +302,9 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -335,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_protect_frames;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -358,6 +437,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -415,10 +495,16 @@ struct otx2_nic {
/* PFC */
u8 pfc_en;
u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
#endif
/* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -458,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -487,7 +578,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -743,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@@ -785,6 +881,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+#ifdef CONFIG_DCB
+ if (pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+
+ return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -807,7 +913,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -888,6 +994,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -907,5 +1015,24 @@ void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 723d2506d309..ccaf97bb1ce0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -7,6 +7,289 @@
#include "otx2_common.h"
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_free_req *free_req;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* free PFC TLx nodes */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
struct cgx_pfc_cfg *req;
@@ -128,6 +411,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
/* Save PFC configuration to interface */
pfvf->pfc_en = pfc->pfc_en;
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err)
+ return err;
+
+process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
return err;
@@ -136,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 3f60a80e34c8..0eb74e8c553d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
}
static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
@@ -963,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -1313,8 +1315,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *vf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9376d0e62914..5803d7f9137c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@@ -1389,18 +1399,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
goto err_free_sq_ptrs;
}
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
err = otx2_config_nix_queues(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
+
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl);
+ err = otx2_txschq_config(pf, lvl, 0, false);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
+#endif
+
mutex_unlock(&mbox->lock);
return err;
@@ -1455,6 +1487,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
mutex_lock(&mbox->lock);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
@@ -1634,8 +1671,7 @@ int otx2_open(struct net_device *netdev)
cq_poll->dev = (void *)pf;
cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
- netif_napi_add(netdev, &cq_poll->napi,
- otx2_napi_handler, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
@@ -1853,6 +1889,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u8 vlan_prio;
+#endif
+
+#ifdef CONFIG_DCB
+ if (!skb->vlan_present)
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1987,8 +2047,19 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -2447,6 +2518,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
@@ -2702,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2916,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf);
}
+ cn10k_mcs_free(pf);
+
#ifdef CONFIG_DCB
/* Disable PFC config */
if (pf->pfc_en) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index fdc2c9315b91..896b2f9bac34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
- struct ptp_rsp *rsp;
- int err;
if (!ptp->nic)
- return 0;
+ return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
- return 0;
+ return -ENOMEM;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp))
- return 0;
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
- return rsp->clk;
+ return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
+{
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ *tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(ptp, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;
@@ -302,6 +349,8 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
}
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
@@ -316,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index 4bbd12ff26e6..aa205a0d158f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_51_16 : 36;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a18e8efd0f1e..5ec11d71bf60 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -19,6 +19,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
@@ -686,7 +692,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -696,6 +703,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -952,16 +966,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
+ struct ptpv2_tstamp *origin_tstamp;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
u64 iova;
- if (!skb_shinfo(skb)->gso_size &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+ if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum);
} else {
skb_tx_timestamp(skb);
}