summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2023-10-05 22:57:34 +0200
committerKalle Valo <kvalo@kernel.org>2023-10-06 17:08:47 +0300
commit7d6904bf26b96ef087514cb7a8c50b62a4911c99 (patch)
tree2feaff8495d88c2102dc2e017b7afc5c58db02d6 /drivers/net
parent9418edf8ff01e7a4904aac1aca4864ecdea37593 (diff)
parent22061bfc57fe08c77141dc876b4af75603c4d61d (diff)
Merge wireless into wireless-next
Resolve several conflicts, mostly between changes/fixes in wireless and the locking rework in wireless-next. One of the conflicts actually shows a bug in wireless that we'll want to fix separately. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Kalle Valo <kvalo@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c9
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/sfc/mae.c89
-rw-r--r--drivers/net/ethernet/sfc/mae.h4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/tc.c476
-rw-r--r--drivers/net/ethernet/sfc/tc.h58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c965
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c7
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c451
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h28
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h1
67 files changed, 3134 insertions, 328 deletions
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 99265667538c..d9e052c49ba1 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -464,7 +464,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t)
ret = sock_queue_err_skb(sk, ackskb);
if (ret)
- kfree_skb(ackskb);
+ dev_kfree_skb_irq(ackskb);
local_irq_enable();
};
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index f2c79456d745..36f9b932b9e2 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -464,7 +464,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
int i;
- pdsc_devcmd_reset(pdsc);
+ if (!pdsc->pdev->is_virtfn)
+ pdsc_devcmd_reset(pdsc);
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
@@ -524,7 +525,8 @@ static void pdsc_fw_down(struct pdsc *pdsc)
}
/* Notify clients of fw_down */
- devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ if (pdsc->fw_reporter)
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
pdsc_notify(PDS_EVENT_RESET, &reset_event);
pdsc_stop(pdsc);
@@ -554,8 +556,9 @@ static void pdsc_fw_up(struct pdsc *pdsc)
/* Notify clients of fw_up */
pdsc->fw_recoveries++;
- devlink_health_reporter_state_update(pdsc->fw_reporter,
- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ if (pdsc->fw_reporter)
+ devlink_health_reporter_state_update(pdsc->fw_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
pdsc_notify(PDS_EVENT_RESET, &reset_event);
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index debe5216fe29..f77cd9f5a2fd 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -121,7 +121,7 @@ static const char *pdsc_devcmd_str(int opcode)
}
}
-static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
+static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
{
struct device *dev = pdsc->dev;
unsigned long start_time;
@@ -131,9 +131,6 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
int done = 0;
int err = 0;
int status;
- int opcode;
-
- opcode = ioread8(&pdsc->cmd_regs->cmd.opcode);
start_time = jiffies;
max_wait = start_time + (max_seconds * HZ);
@@ -180,10 +177,10 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
pdsc_devcmd_dbell(pdsc);
- err = pdsc_devcmd_wait(pdsc, max_seconds);
+ err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
- if (err == -ENXIO || err == -ETIMEDOUT)
+ if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
queue_work(pdsc->wq, &pdsc->health_work);
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 9c6b3653c1c7..d9607033bbf2 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -10,6 +10,9 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
{
int vt;
+ if (!pdsc->viftype_status)
+ return NULL;
+
for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
if (pdsc->viftype_status[vt].dl_id == dl_id)
return &pdsc->viftype_status[vt];
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e388bffda2dd..14b311196b8f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17794,10 +17794,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
tnapi->int_mbox = intmbx;
- if (i <= 4)
- intmbx += 0x8;
- else
- intmbx += 0x4;
+ intmbx += 0x8;
tnapi->consmbox = rcvmbx;
tnapi->prodmbox = sndmbx;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 5a6ffa738396..f818dd215c05 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -131,6 +131,8 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
wr32(hw, GLTSYN_CMD, cmd_val);
@@ -1226,18 +1228,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Note there is no equivalent of this operation on E810, as that device
- * always handles all external PHYs internally.
+ * Do not use this function directly. If you want to configure exactly one
+ * port, use ice_ptp_one_port_cmd() instead.
*/
static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1261,6 +1263,8 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
case ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
/* Tx case */
@@ -1307,6 +1311,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
}
/**
+ * ice_ptp_one_port_cmd - Prepare one port for a timer command
+ * @hw: pointer to the HW struct
+ * @configured_port: the port to configure with configured_cmd
+ * @configured_cmd: timer command to prepare on the configured_port
+ *
+ * Prepare the configured_port for the configured_cmd, and prepare all other
+ * ports for ICE_PTP_NOP. This causes the configured_port to execute the
+ * desired command while all other ports perform no operation.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ enum ice_ptp_tmr_cmd cmd;
+ int err;
+
+ if (port == configured_port)
+ cmd = configured_cmd;
+ else
+ cmd = ICE_PTP_NOP;
+
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
@@ -1322,7 +1359,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_one_port_cmd(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
if (err)
return err;
}
@@ -2252,6 +2289,9 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
goto err_unlock;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Issue the sync to activate the time adjustment */
ice_ptp_exec_tmr_cmd(hw);
@@ -2372,6 +2412,9 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
ice_ptp_exec_tmr_cmd(hw);
err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
@@ -2847,6 +2890,8 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
+ case ICE_PTP_NOP:
+ return 0;
}
/* Read, modify, write */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 13547707b8e3..9aa10b0426fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -9,7 +9,8 @@ enum ice_ptp_tmr_cmd {
INIT_INCVAL,
ADJ_TIME,
ADJ_TIME_AT_TIME,
- READ_TIME
+ READ_TIME,
+ ICE_PTP_NOP,
};
enum ice_ptp_serdes {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9f63a10c6f80..1ab787ed254d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4814,6 +4814,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
+#if (PAGE_SIZE < 8192)
+ struct e1000_hw *hw = &adapter->hw;
+#endif
+
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
@@ -4824,10 +4828,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
- return;
-
- set_ring_uses_large_buffer(rx_ring);
+ if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ rd32(E1000_RCTL) & E1000_RCTL_SBP)
+ set_ring_uses_large_buffer(rx_ring);
#endif
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 988383e20bb8..e06f77ad6106 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -55,6 +55,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
[LMAC_MODE_50G_R] = "50G_R",
[LMAC_MODE_100G_R] = "100G_R",
[LMAC_MODE_USXGMII] = "USXGMII",
+ [LMAC_MODE_USGMII] = "USGMII",
};
/* CGX PHY management internal APIs */
@@ -231,6 +232,9 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
int index, id;
u64 cfg;
+ if (!lmac)
+ return -ENODEV;
+
/* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
@@ -549,15 +553,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx);
- u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ u16 max_dmac;
int index, i;
u64 cfg = 0;
int id;
- if (!cgx)
+ if (!cgx || !lmac)
return;
+ max_dmac = lmac->mac_to_index_bmap.max;
id = get_sequence_id_of_lmac(cgx, lmac_id);
mac_ops = cgx->mac_ops;
@@ -730,7 +735,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
int corr_reg, uncorr_reg;
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 574114179688..6f7d1dee5830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -110,6 +110,7 @@ enum LMAC_TYPE {
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_USGMII = 11,
LMAC_MODE_MAX,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index b4fcb20c3f4f..af21e2030cff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -355,8 +355,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
{
+ u64 cfg, pfc_class_mask_cfg;
rpm_t *rpm = rpmd;
- u64 cfg;
/* ALL pause frames received are completely ignored */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
@@ -380,9 +380,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
/* Disable all PFC classes */
- cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+ cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -605,8 +607,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
@@ -635,10 +640,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
- RPMX_CMRX_PRT_CBFC_CTL;
-
rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index b3f766b970ca..f2b1edf1bb43 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -236,6 +236,11 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+ if (!pfmap) {
+ dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
+ event->cgx_id, event->lmac_id);
+ return;
+ }
do {
pfid = find_first_bit(&pfmap,
@@ -345,7 +350,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index dce3cea00032..8511906cb4e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -806,6 +806,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
mutex_unlock(&pfvf->mbox.lock);
}
+EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
@@ -1434,7 +1435,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
}
pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
- pp_params.pool_size = numptrs;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
pp_params.dma_dir = DMA_FROM_DEVICE;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index ccaf97bb1ce0..bfddbff7bcdf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
* link config level. These rest of the scheduler can be
* same as hw.txschq_list.
*/
- for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
req->schq[lvl] = 1;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
- for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
if (!rsp->schq[lvl])
return -ENOSPC;
@@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
- struct nix_txsch_free_req *free_req;
+ int lvl;
- mutex_lock(&pfvf->mbox.lock);
/* free PFC TLx nodes */
- free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
- if (!free_req) {
- mutex_unlock(&pfvf->mbox.lock);
- return -ENOMEM;
- }
-
- free_req->flags = TXSCHQ_FREE_ALL;
- otx2_sync_mbox_msg(&pfvf->mbox);
- mutex_unlock(&pfvf->mbox.lock);
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->pfc_schq_list[lvl][prio]);
pfvf->pfc_alloc_status[prio] = false;
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index b5d689eeff80..9e3bfbe5c480 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -23,6 +23,8 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 60
+#define OTX2_PAGE_POOL_SZ 2048
+
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index b244c02c5b51..e24afeaea0da 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
}
}
-
static int
wed_txinfo_show(struct seq_file *s, void *data)
{
@@ -127,16 +126,23 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
- DUMP_STR("TX FREE"),
+ DUMP_STR("WED TX FREE"),
DUMP_WED(WED_RX_MIB(0)),
+ DUMP_WED_RING(WED_RING_RX(0)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
+ DUMP_WED(WED_RX_MIB(1)),
+ DUMP_WED_RING(WED_RING_RX(1)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
+
+ DUMP_STR("WED WPDMA TX FREE"),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
@@ -208,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void *data)
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 0a50bb98c5ea..47ea69feb3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b56b187a9097..7e94caca4888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3b88a8bb7082..7d4ceb9b9c16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -38,6 +38,7 @@
#include <net/netevent.h>
#include "en.h"
+#include "eswitch.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en_rep.h"
@@ -670,6 +671,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto err_xfrm;
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -678,7 +684,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto err_xfrm;
+ goto unblock_ipsec;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -735,6 +741,8 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_ipsec:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
kfree(sa_entry);
NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
@@ -764,6 +772,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
@@ -780,6 +789,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+ mlx5_eswitch_unblock_ipsec(ipsec->mdev);
sa_entry_free:
kfree(sa_entry);
}
@@ -1055,6 +1065,11 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
pol_entry->x = x;
pol_entry->ipsec = priv->ipsec;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto ipsec_busy;
+ }
+
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
@@ -1064,6 +1079,8 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return 0;
err_fs:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
+ipsec_busy:
kfree(pol_entry);
NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
@@ -1074,6 +1091,7 @@ static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
}
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index a1cfddd05bc4..7dba4221993f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -254,6 +254,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
@@ -357,6 +359,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto err_add;
/* Create FT */
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
@@ -411,6 +415,8 @@ err_pol_ft:
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
@@ -428,26 +434,19 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (rx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = rx_create(mdev, ipsec, rx, family);
- mlx5_eswitch_block_mode_unlock(mdev, err);
- if (err)
- goto err_out;
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
skip:
rx->ft.refcnt++;
return 0;
-
-err_out:
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
@@ -456,12 +455,8 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
if (--rx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
rx_destroy(ipsec->mdev, ipsec, rx, family);
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
@@ -581,6 +576,8 @@ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
mlx5_destroy_flow_group(tx->sa.group);
}
mlx5_destroy_flow_table(tx->ft.sa);
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(ipsec->mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
@@ -621,6 +618,8 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
if (err)
goto err_status_rule;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
@@ -687,6 +686,8 @@ err_pol_ft:
err_sa_miss:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
err_status_rule:
mlx5_destroy_flow_table(tx->ft.status);
@@ -720,32 +721,22 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (tx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = tx_create(ipsec, tx, ipsec->roce);
if (err) {
- mlx5_eswitch_block_mode_unlock(mdev, err);
- goto err_out;
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
}
if (tx == ipsec->tx_esw)
ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
- mlx5_eswitch_block_mode_unlock(mdev, err);
-
skip:
tx->ft.refcnt++;
return 0;
-
-err_out:
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
@@ -753,19 +744,13 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
-
if (tx == ipsec->tx_esw) {
mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
}
tx_destroy(ipsec, tx, ipsec->roce);
-
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 3c254a710006..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -92,6 +92,12 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+#ifdef CONFIG_XFRM_OFFLOAD
+ .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
+ .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
+#endif /* CONFIG_XFRM_OFFLOAD */
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
new file mode 100644
index 000000000000..da10e04777cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
+ *result = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+enum esw_vport_ipsec_offload {
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
+};
+
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ bool ipsec_enabled;
+ int err;
+
+ /* Querying IPsec caps only makes sense when generic ipsec_offload
+ * HCA cap is enabled
+ */
+ err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
+ if (err)
+ return err;
+
+ if (!ipsec_enabled) {
+ vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ vport->info.ipsec_crypto_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
+ ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (vport->vport == MLX5_VPORT_PF)
+ return -EOPNOTSUPP;
+
+ if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
+ err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ } else {
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
+ if (err)
+ return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+ }
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ vport->info.ipsec_crypto_enabled = enable;
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
+ if (ret)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
+ ret = -EOPNOTSUPP;
+free:
+ kvfree(query_cap);
+ return ret;
+}
+
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
+{
+ /* Old firmware doesn't support ipsec_offload capability for VFs. This
+ * can be detected by checking reformat_add_esp_trasport capability -
+ * when this cap isn't supported it means firmware cannot be trusted
+ * about what it reports for ipsec_offload cap.
+ */
+ return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ err = esw_ipsec_offload_supported(dev, vport_num);
+ if (err)
+ return err;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
+ goto free;
+
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index db1c2a076364..6cd7d6497e10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -48,6 +48,7 @@
#include "devlink.h"
#include "ecpf.h"
#include "en/mod_hdr.h"
+#include "en_accel/ipsec.h"
enum {
MLX5_ACTION_NONE = 0,
@@ -831,6 +832,8 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+
+ err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
out_free:
kfree(query_ctx);
return err;
@@ -913,6 +916,9 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count++;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
@@ -969,6 +975,10 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count--;
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -2336,3 +2346,34 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
return mlx5_esw_allowed(esw) ? esw->dev : NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
+
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->enabled_ipsec_vf_count) {
+ mutex_unlock(&esw->state_lock);
+ return false;
+ }
+
+ dev->num_ipsec_offloads++;
+ mutex_unlock(&esw->state_lock);
+ return true;
+}
+
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ /* Failure means no eswitch => core dev is not a PF */
+ return;
+
+ mutex_lock(&esw->state_lock);
+ dev->num_ipsec_offloads--;
+ mutex_unlock(&esw->state_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6fcece69d3be..37ab66e7b403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -163,6 +163,8 @@ struct mlx5_vport_info {
u8 trusted: 1;
u8 roce_enabled: 1;
u8 mig_enabled: 1;
+ u8 ipsec_crypto_enabled: 1;
+ u8 ipsec_packet_enabled: 1;
};
/* Vport context events */
@@ -380,6 +382,7 @@ struct mlx5_eswitch {
struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
+ u16 enabled_ipsec_vf_count;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -558,6 +561,16 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+#endif /* CONFIG_XFRM_OFFLOAD */
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -829,10 +842,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err);
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev);
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
@@ -857,6 +868,22 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport);
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -916,13 +943,14 @@ static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; }
-
-static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {}
-
-static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {}
+static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
+static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ return false;
+}
-static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {}
+static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 67eab99f95b1..752fb0dfb111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3641,65 +3641,32 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
return net_eq(devl_net, netdev_net);
}
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev)
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- /* Failure means no eswitch => not possible to change eswitch mode */
- devl_unlock(devlink);
+ if (!mlx5_esw_allowed(esw))
return 0;
- }
+ /* Take TC into account */
err = mlx5_esw_try_lock(esw);
- if (err < 0) {
- devl_unlock(devlink);
+ if (err < 0)
return err;
- }
-
- return 0;
-}
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
- if (!err)
- esw->offloads.num_block_mode++;
+ esw->offloads.num_block_mode++;
mlx5_esw_unlock(esw);
- devl_unlock(devlink);
+ return 0;
}
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev)
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
-}
-
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
esw->offloads.num_block_mode--;
up_write(&esw->mode_lock);
}
@@ -3903,38 +3870,28 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- devl_unlock(devlink);
- /* Failure means no eswitch => not possible to change encap */
+ if (!mlx5_esw_allowed(esw))
return true;
- }
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
@@ -4410,3 +4367,172 @@ mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handl
return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_crypto_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec crypto");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_crypto_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
+ goto unlock;
+ }
+
+ vport->info.ipsec_crypto_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+ bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec packet mode");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_packet_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set IPsec packet mode");
+ goto unlock;
+ }
+
+ vport->info.ipsec_packet_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 377372f0578a..aa29f09e8356 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -32,6 +32,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
+#include <linux/log2.h>
#include <linux/ptp_clock_kernel.h>
#include <rdma/mlx5-abi.h>
#include "lib/eq.h"
@@ -39,10 +40,6 @@
#include "clock.h"
enum {
- MLX5_CYCLES_SHIFT = 31
-};
-
-enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
};
@@ -93,6 +90,31 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
+{
+ /* Optimal shift constant leads to corrections above just 1 scaled ppm.
+ *
+ * Two sets of equations are needed to derive the optimal shift
+ * constant for the cyclecounter.
+ *
+ * dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * Using the two equations together
+ *
+ * dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
+ * dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
+ * dev_freq_khz = 2^(shift_constant - 16)
+ *
+ * then yields
+ *
+ * shift_constant = ilog2(dev_freq_khz) + 16
+ */
+
+ return min(ilog2(dev_freq_khz) + 16,
+ ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
@@ -909,7 +931,7 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
timer->cycles.read = read_internal_timer;
- timer->cycles.shift = MLX5_CYCLES_SHIFT;
+ timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
timer->cycles.mult = clocksource_khz2mult(dev_freq,
timer->cycles.shift);
timer->nominal_c_mult = timer->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 70735068cf29..0fd290d776ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -405,7 +405,8 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
return sprintf(buf, "front panel %03u\n",
- mlxsw_hwmon_attr->type_index);
+ mlxsw_hwmon_attr->type_index + 1 -
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count);
}
static ssize_t
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 41298835a11e..d23f293e285c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -48,6 +48,7 @@
#define MLXSW_I2C_MBOX_SIZE_BITS 12
#define MLXSW_I2C_ADDR_BUF_SIZE 4
#define MLXSW_I2C_BLK_DEF 32
+#define MLXSW_I2C_BLK_MAX 100
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
@@ -444,7 +445,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
} else {
/* No input mailbox is case of initialization query command. */
reg_size = MLXSW_I2C_MAX_DATA_SIZE;
- num = reg_size / mlxsw_i2c->block_size;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
@@ -653,7 +654,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client)
return -EOPNOTSUPP;
}
- mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
+ mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
min_t(u16, quirks->max_read_len,
quirks->max_write_len));
} else {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5eb50b265c0b..6351a2dc13bc 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5239,13 +5239,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
- * Chips from RTL8168h partially have issues with L1.2, but seem
- * to work fine with L1 and L1.1.
*/
if (rtl_aspm_is_safe(tp))
rc = 0;
- else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
tp->aspm_manageable = !rc;
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 3b8780c76b6e..c3e2b4a21d10 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1219,6 +1219,71 @@ fail:
return rc;
}
+/**
+ * efx_mae_allocate_pedit_mac() - allocate pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action to be installed
+ *
+ * Attempts to install @ped in HW and populates its id with an index of this
+ * entry in the firmware MAC address table on success.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN !=
+ sizeof(ped->h_addr));
+ memcpy(MCDI_PTR(inbuf, MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR), ped->h_addr,
+ sizeof(ped->h_addr));
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ ped->fw_id = MCDI_DWORD(outbuf, MAE_MAC_ADDR_ALLOC_OUT_MAC_ID);
+ return 0;
+}
+
+/**
+ * efx_mae_free_pedit_mac() - free pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action that needs to be freed
+ *
+ * Frees @ped in HW, check that firmware did not free a different one and clears
+ * the id (which denotes the index of the entry in the MAC address table).
+ */
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MAC_ADDR_FREE_IN_MAC_ID, ped->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc || outlen < sizeof(outbuf))
+ return;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what MAC addresses exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID) != ped->fw_id))
+ return;
+ /* We're probably about to free @ped, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ ped->fw_id = MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL;
+}
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
@@ -1226,15 +1291,27 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
size_t outlen;
int rc;
- MCDI_POPULATE_DWORD_3(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push,
MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop,
- MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap);
+ MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap,
+ MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL,
+ act->do_ttl_dec);
+
+ if (act->src_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ act->src_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+
+ if (act->dst_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ act->dst_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
if (act->count && !WARN_ON(!act->count->cnt))
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
act->count->cnt->fw_id);
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index e88e80574f15..8df30bc4f3ba 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -103,6 +103,10 @@ int efx_mae_update_encap_md(struct efx_nic *efx,
int efx_mae_free_encap_md(struct efx_nic *efx,
struct efx_tc_encap_action *encap);
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3eab1802f6b0..f54200f03e15 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1359,7 +1359,9 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
goto fail;
rc = efx_ptp_insert_eth_multicast_filter(efx);
- if (rc < 0)
+
+ /* Not all firmware variants support this filter */
+ if (rc < 0 && rc != -EPROTONOSUPPORT)
goto fail;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 039180c61c83..047322b04d4f 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -31,6 +31,9 @@ enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev)
return EFX_ENCAP_TYPE_NONE;
}
+#define EFX_TC_HDR_TYPE_TTL_MASK ((u32)0xff)
+/* Hoplimit is stored in the most significant byte in the pedit ipv6 header action */
+#define EFX_TC_HDR_TYPE_HLIMIT_MASK ~((u32)0xff000000)
#define EFX_EFV_PF NULL
/* Look up the representor information (efv) for a device.
* May return NULL for the PF (us), or an error pointer for a device that
@@ -86,6 +89,12 @@ s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
return mport;
}
+static const struct rhashtable_params efx_tc_mac_ht_params = {
+ .key_len = offsetofend(struct efx_tc_mac_pedit_action, h_addr),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_mac_pedit_action, linkage),
+};
+
static const struct rhashtable_params efx_tc_encap_match_ht_params = {
.key_len = offsetof(struct efx_tc_encap_match, linkage),
.key_offset = 0,
@@ -110,6 +119,56 @@ static const struct rhashtable_params efx_tc_recirc_ht_params = {
.head_offset = offsetof(struct efx_tc_recirc_id, linkage),
};
+static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx,
+ unsigned char h_addr[ETH_ALEN],
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped, *old;
+ int rc;
+
+ ped = kzalloc(sizeof(*ped), GFP_USER);
+ if (!ped)
+ return ERR_PTR(-ENOMEM);
+ memcpy(ped->h_addr, h_addr, ETH_ALEN);
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->mac_ht,
+ &ped->linkage,
+ efx_tc_mac_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ped);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found, ref taken */
+ return old;
+ }
+
+ rc = efx_mae_allocate_pedit_mac(efx, ped);
+ if (rc < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to store pedit MAC address in hw");
+ goto out_remove;
+ }
+
+ /* ref and return */
+ refcount_set(&ped->ref, 1);
+ return ped;
+out_remove:
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ kfree(ped);
+ return ERR_PTR(rc);
+}
+
+static void efx_tc_flower_put_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ if (!refcount_dec_and_test(&ped->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ efx_mae_free_pedit_mac(efx, ped);
+ kfree(ped);
+}
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -135,6 +194,10 @@ static void efx_tc_free_action_set(struct efx_nic *efx,
list_del(&act->encap_user);
efx_tc_flower_release_encap_md(efx, act->encap_md);
}
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
kfree(act);
}
@@ -697,6 +760,8 @@ static const char *efx_tc_encap_type_name(enum efx_encap_type typ)
/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
enum efx_tc_action_order {
EFX_TC_AO_DECAP,
+ EFX_TC_AO_DEC_TTL,
+ EFX_TC_AO_PEDIT_MAC_ADDRS,
EFX_TC_AO_VLAN_POP,
EFX_TC_AO_VLAN_PUSH,
EFX_TC_AO_COUNT,
@@ -711,6 +776,15 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
case EFX_TC_AO_DECAP:
if (act->decap)
return false;
+ /* PEDIT_MAC_ADDRS must not happen before DECAP, though it
+ * can wait until much later
+ */
+ if (act->dst_mac || act->src_mac)
+ return false;
+
+ /* Decrementing ttl must not happen before DECAP */
+ if (act->do_ttl_dec)
+ return false;
fallthrough;
case EFX_TC_AO_VLAN_POP:
if (act->vlan_pop >= 2)
@@ -730,12 +804,17 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
if (act->count)
return false;
fallthrough;
+ case EFX_TC_AO_PEDIT_MAC_ADDRS:
case EFX_TC_AO_ENCAP:
if (act->encap_md)
return false;
fallthrough;
case EFX_TC_AO_DELIVER:
return !act->deliver;
+ case EFX_TC_AO_DEC_TTL:
+ if (act->encap_md)
+ return false;
+ return !act->do_ttl_dec;
default:
/* Bad caller. Whatever they wanted to do, say they can't. */
WARN_ON_ONCE(1);
@@ -900,6 +979,375 @@ static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx,
efx_tc_flower_put_counter_index(efx, act->count);
}
+/**
+ * struct efx_tc_mangler_state - accumulates 32-bit pedits into fields
+ *
+ * @dst_mac_32: dst_mac[0:3] has been populated
+ * @dst_mac_16: dst_mac[4:5] has been populated
+ * @src_mac_16: src_mac[0:1] has been populated
+ * @src_mac_32: src_mac[2:5] has been populated
+ * @dst_mac: h_dest field of ethhdr
+ * @src_mac: h_source field of ethhdr
+ *
+ * Since FLOW_ACTION_MANGLE comes in 32-bit chunks that do not
+ * necessarily equate to whole fields of the packet header, this
+ * structure is used to hold the cumulative effect of the partial
+ * field pedits that have been processed so far.
+ */
+struct efx_tc_mangler_state {
+ u8 dst_mac_32:1; /* eth->h_dest[0:3] */
+ u8 dst_mac_16:1; /* eth->h_dest[4:5] */
+ u8 src_mac_16:1; /* eth->h_source[0:1] */
+ u8 src_mac_32:1; /* eth->h_source[2:5] */
+ unsigned char dst_mac[ETH_ALEN];
+ unsigned char src_mac[ETH_ALEN];
+};
+
+/** efx_tc_complete_mac_mangle() - pull complete field pedits out of @mung
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @mung: accumulated partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Check @mung to find any combinations of partial mangles that can be
+ * combined into a complete packet field edit, add that edit to @act,
+ * and consume the partial mangles from @mung.
+ */
+
+static int efx_tc_complete_mac_mangle(struct efx_nic *efx,
+ struct efx_tc_action_set *act,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped;
+
+ if (mung->dst_mac_32 && mung->dst_mac_16) {
+ ped = efx_tc_flower_get_mac(efx, mung->dst_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated dst_mac */
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
+
+ act->dst_mac = ped;
+
+ /* consume the incomplete state */
+ mung->dst_mac_32 = 0;
+ mung->dst_mac_16 = 0;
+ }
+ if (mung->src_mac_16 && mung->src_mac_32) {
+ ped = efx_tc_flower_get_mac(efx, mung->src_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated src_mac */
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+
+ act->src_mac = ped;
+
+ /* consume the incomplete state */
+ mung->src_mac_32 = 0;
+ mung->src_mac_16 = 0;
+ }
+ return 0;
+}
+
+static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct netlink_ext_ack *extack)
+{
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the ttl.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) != U8_MAX)
+ break;
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* check that pedit applies to hoplimit only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the hoplimit.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val >> 24) != U8_MAX)
+ break;
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: ttl add action type %x %x %x/%x",
+ fa->mangle.htype, fa->mangle.offset,
+ fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * efx_tc_mangle() - handle a single 32-bit (or less) pedit
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @fa: FLOW_ACTION_MANGLE action metadata
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ * @match: original match used along with the mangle action
+ *
+ * Identify the fields written by a FLOW_ACTION_MANGLE, and record
+ * the partial mangle state in @mung. If this mangle completes an
+ * earlier partial mangle, consume and apply to @act by calling
+ * efx_tc_complete_mac_mangle().
+ */
+
+static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack,
+ struct efx_tc_match *match)
+{
+ __le32 mac32;
+ __le16 mac16;
+ u8 tr_ttl;
+
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_dest) != 0);
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_source) != 6);
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_PEDIT_MAC_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Pedit mangle mac action violates action order");
+ return -EOPNOTSUPP;
+ }
+ switch (fa->mangle.offset) {
+ case 0:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.dst32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ /* Ethernet address is little-endian */
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->dst_mac, &mac32, sizeof(mac32));
+ mung->dst_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 4:
+ if (fa->mangle.mask == 0xffff) {
+ mac16 = cpu_to_le16(fa->mangle.val >> 16);
+ memcpy(mung->src_mac, &mac16, sizeof(mac16));
+ mung->src_mac_16 = 1;
+ } else if (fa->mangle.mask == 0xffff0000) {
+ mac16 = cpu_to_le16((u16)fa->mangle.val);
+ memcpy(mung->dst_mac + 4, &mac16, sizeof(mac16));
+ mung->dst_mac_16 = 1;
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 8:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.src32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->src_mac + 2, &mac32, sizeof(mac32));
+ mung->src_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* we currently only support pedit IP4 when it applies
+ * to TTL and then only when it can be achieved with a
+ * decrement ttl action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement ttl past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the ttl field (offset is %u)",
+ fa->mangle.offset);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* we currently only support pedit IP6 when it applies
+ * to the hoplimit and then only when it can be achieved
+ * with a decrement hoplimit action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ fa->mangle.mask);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement hop_limit past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val >> 24) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the hop_limit field");
+ return -EOPNOTSUPP;
+ }
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled mangle htype %u for action rule",
+ fa->mangle.htype);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * efx_tc_incomplete_mangle() - check for leftover partial pedits
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Since the MAE can only overwrite whole fields, any partial
+ * field mangle left over on reaching packet delivery (mirred or
+ * end of TC actions) cannot be offloaded. Check for any such
+ * and reject them with -%EOPNOTSUPP.
+ */
+
+static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ if (mung->dst_mac_32 || mung->dst_mac_16) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of destination MAC address");
+ return -EOPNOTSUPP;
+ }
+ if (mung->src_mac_16 || mung->src_mac_32) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of source MAC address");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
@@ -1295,6 +1743,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
struct netlink_ext_ack *extack = tc->common.extack;
const struct ip_tunnel_info *encap_info = NULL;
struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_mangler_state mung = {};
struct efx_tc_action_set *act = NULL;
const struct flow_action_entry *fa;
struct efx_rep *from_efv, *to_efv;
@@ -1631,6 +2080,16 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
act->vlan_proto[act->vlan_push] = fa->vlan.proto;
act->vlan_push++;
break;
+ case FLOW_ACTION_ADD:
+ rc = efx_tc_pedit_add(efx, act, fa, extack);
+ if (rc < 0)
+ goto release;
+ break;
+ case FLOW_ACTION_MANGLE:
+ rc = efx_tc_mangle(efx, act, fa, &mung, extack, &match);
+ if (rc < 0)
+ goto release;
+ break;
case FLOW_ACTION_TUNNEL_ENCAP:
if (encap_info) {
/* Can't specify encap multiple times.
@@ -1670,6 +2129,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
}
}
+ rc = efx_tc_incomplete_mangle(&mung, extack);
+ if (rc < 0)
+ goto release;
if (act) {
/* Not shot/redirected, so deliver to default dest */
if (from_efv == EFX_EFV_PF)
@@ -2156,6 +2618,14 @@ static void efx_tc_lhs_free(void *ptr, void *arg)
kfree(rule);
}
+static void efx_tc_mac_free(void *ptr, void *__unused)
+{
+ struct efx_tc_mac_pedit_action *ped = ptr;
+
+ WARN_ON(refcount_read(&ped->ref));
+ kfree(ped);
+}
+
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
@@ -2196,6 +2666,9 @@ int efx_init_struct_tc(struct efx_nic *efx)
rc = efx_tc_init_counters(efx);
if (rc < 0)
goto fail_counters;
+ rc = rhashtable_init(&efx->tc->mac_ht, &efx_tc_mac_ht_params);
+ if (rc < 0)
+ goto fail_mac_ht;
rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params);
if (rc < 0)
goto fail_encap_match_ht;
@@ -2233,6 +2706,8 @@ fail_lhs_rule_ht:
fail_match_action_ht:
rhashtable_destroy(&efx->tc->encap_match_ht);
fail_encap_match_ht:
+ rhashtable_destroy(&efx->tc->mac_ht);
+fail_mac_ht:
efx_tc_destroy_counters(efx);
fail_counters:
efx_tc_destroy_encap_actions(efx);
@@ -2268,6 +2743,7 @@ void efx_fini_struct_tc(struct efx_nic *efx)
rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx);
WARN_ON(!ida_is_empty(&efx->tc->recirc_ida));
ida_destroy(&efx->tc->recirc_ida);
+ rhashtable_free_and_destroy(&efx->tc->mac_ht, efx_tc_mac_free, NULL);
efx_tc_fini_counters(efx);
efx_tc_fini_encap_actions(efx);
mutex_unlock(&efx->tc->mutex);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 40d2c803fca8..4dd2c378fd9f 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -18,6 +18,23 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
+/**
+ * struct efx_tc_mac_pedit_action - mac pedit action fields
+ *
+ * @h_addr: mac address field of ethernet header
+ * @linkage: rhashtable reference
+ * @ref: reference count
+ * @fw_id: index of this entry in firmware MAC address table
+ *
+ * MAC address edits are indirected through a table in the hardware
+ */
+struct efx_tc_mac_pedit_action {
+ u8 h_addr[ETH_ALEN];
+ struct rhash_head linkage;
+ refcount_t ref;
+ u32 fw_id; /* index of this entry in firmware MAC address table */
+};
+
static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
{
return !memchr_inv(addr, 0xff, sizeof(*addr));
@@ -25,20 +42,45 @@ static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
struct efx_tc_encap_action; /* see tc_encap_actions.h */
+/**
+ * struct efx_tc_action_set - collection of tc action fields
+ *
+ * @vlan_push: the number of vlan headers to push
+ * @vlan_pop: the number of vlan headers to pop
+ * @decap: used to indicate a tunnel header decapsulation should take place
+ * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented
+ * @deliver: used to indicate a deliver action should take place
+ * @vlan_tci: tci fields for vlan push actions
+ * @vlan_proto: ethernet types for vlan push actions
+ * @count: counter mapping
+ * @encap_md: encap entry in tc_encap_ht table
+ * @encap_user: linked list of encap users (encap_md->users)
+ * @user: owning action-set-list. Only populated if @encap_md is; used by efx_tc_update_encap() fallback handling
+ * @count_user: linked list of counter users (counter->users)
+ * @dest_mport: destination mport
+ * @src_mac: source mac entry in tc_mac_ht table
+ * @dst_mac: destination mac entry in tc_mac_ht table
+ * @fw_id: index of this entry in firmware actions table
+ * @list: linked list of tc actions
+ *
+ */
struct efx_tc_action_set {
u16 vlan_push:2;
u16 vlan_pop:2;
u16 decap:1;
+ u16 do_ttl_dec:1;
u16 deliver:1;
- __be16 vlan_tci[2]; /* TCIs for vlan_push */
- __be16 vlan_proto[2]; /* Ethertypes for vlan_push */
+ __be16 vlan_tci[2];
+ __be16 vlan_proto[2];
struct efx_tc_counter_index *count;
- struct efx_tc_encap_action *encap_md; /* entry in tc_encap_ht table */
- struct list_head encap_user; /* entry on encap_md->users list */
- struct efx_tc_action_set_list *user; /* Only populated if encap_md */
- struct list_head count_user; /* entry on counter->users list, if encap */
+ struct efx_tc_encap_action *encap_md;
+ struct list_head encap_user;
+ struct efx_tc_action_set_list *user;
+ struct list_head count_user;
u32 dest_mport;
- u32 fw_id; /* index of this entry in firmware actions table */
+ struct efx_tc_mac_pedit_action *src_mac;
+ struct efx_tc_mac_pedit_action *dst_mac;
+ u32 fw_id;
struct list_head list;
};
@@ -220,6 +262,7 @@ struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */
* @counter_ht: Hashtable of TC counters (FW IDs and counter values)
* @counter_id_ht: Hashtable mapping TC counter cookies to counters
* @encap_ht: Hashtable of TC encap actions
+ * @mac_ht: Hashtable of MAC address entries (for pedits)
* @encap_match_ht: Hashtable of TC encap matches
* @match_action_ht: Hashtable of TC match-action rules
* @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules
@@ -257,6 +300,7 @@ struct efx_tc_state {
struct rhashtable counter_ht;
struct rhashtable counter_id_ht;
struct rhashtable encap_ht;
+ struct rhashtable mac_ht;
struct rhashtable encap_match_ht;
struct rhashtable match_action_ht;
struct rhashtable lhs_rule_ht;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 535856fffaea..df34e34cc14f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -70,7 +70,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -87,7 +87,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -110,7 +110,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -125,7 +125,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -192,8 +192,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
switch (speed) {
@@ -260,7 +260,7 @@ static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index e22ef0d6bc73..0a20c3d24722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -89,7 +89,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
@@ -118,7 +118,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -130,13 +130,13 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct ingenic_mac *mac = plat_dat->bsp_priv;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -149,14 +149,14 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -169,7 +169,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
@@ -177,7 +177,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -190,7 +190,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
@@ -220,7 +220,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index a25c187d3185..2cd6fce5c993 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -117,7 +117,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
}
plat->phy_interface = phy_mode;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->mac_interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 18e84ba693a6..d0aa674ce705 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -50,9 +50,9 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 7580077383c0..cd796ec04132 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -587,7 +587,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->interface = priv_plat->phy_mode;
+ plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 7db176e8691f..9bf102bbc6a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -236,7 +236,7 @@ static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
struct net_device *ndev = dev_get_drvdata(dwmac->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- return priv->plat->interface;
+ return priv->plat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 892612564694..9289bb87c3e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -60,7 +60,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
unsigned int mode;
int err;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
mode = STARFIVE_DWMAC_PHY_INFT_RMII;
break;
@@ -72,7 +72,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
default:
dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 3a09085819dc..26ea8c687881 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -171,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
clk_rate = clk_get_rate(dwmac->clk_eth_ck);
dwmac->enable_eth_ck = false;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
dwmac->enable_eth_ck = true;
@@ -210,7 +210,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
@@ -230,7 +230,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
@@ -241,7 +241,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index c23420863a8d..01e77368eef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1016,7 +1016,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (plat->interface) {
+ switch (plat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1031,7 +1031,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->interface));
+ phy_modes(plat->mac_interface));
return -EINVAL;
}
@@ -1231,7 +1231,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->interface = interface;
+ plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 33ca5c50bdcd..9a3182b9e767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1119,7 +1119,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->interface;
+ int interface = priv->plat->mac_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1214,7 +1214,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
- /* Set the platform/firmware specified interface mode */
+ /* Set the platform/firmware specified interface mode. Note, phylink
+ * deals with the PHY interface mode, not the MAC interface mode.
+ */
__set_bit(mode, priv->phylink_config.supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ff330423ee66..35f4b1484029 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -419,9 +419,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->interface = stmmac_of_get_mac_mode(np);
- if (plat->interface < 0)
- plat->interface = plat->phy_interface;
+ plat->mac_interface = stmmac_of_get_mac_mode(np);
+ if (plat->mac_interface < 0)
+ plat->mac_interface = plat->phy_interface;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 63e510b6860f..88b5b1b47779 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -186,6 +186,7 @@ config CPMAC
config TI_ICSSG_PRUETH
tristate "TI Gigabit PRU Ethernet driver"
select PHYLIB
+ select TI_ICSS_IEP
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
help
@@ -196,4 +197,15 @@ config TI_ICSSG_PRUETH
to support the Ethernet operation. Currently, it supports Ethernet
with 1G and 100M link speed.
+config TI_ICSS_IEP
+ tristate "TI PRU ICSS IEP driver"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ This driver enables support for the PRU-ICSS Industrial Ethernet
+ Peripheral within a PRU-ICSS subsystem present on various TI SoCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called icss_iep.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9176d79c36e1..34fd7a716ba6 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -38,3 +38,4 @@ icssg-prueth-y := k3-cppi-desc-pool.o \
icssg/icssg_mii_cfg.o \
icssg/icssg_stats.o \
icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
new file mode 100644
index 000000000000..4cf2a52e4378
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/timekeeping.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "icss_iep.h"
+
+#define IEP_MAX_DEF_INC 0xf
+#define IEP_MAX_COMPEN_INC 0xfff
+#define IEP_MAX_COMPEN_COUNT 0xffffff
+
+#define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
+#define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
+#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
+
+#define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
+
+#define IEP_CMP_CFG_SHADOW_EN BIT(17)
+#define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
+#define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
+
+#define IEP_CMP_STATUS(cmp) (1 << (cmp))
+
+#define IEP_SYNC_CTRL_SYNC_EN BIT(0)
+#define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
+
+#define IEP_MIN_CMP 0
+#define IEP_MAX_CMP 15
+
+#define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
+#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
+#define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
+
+#define LATCH_INDEX(ts_index) ((ts_index) + 6)
+#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
+#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
+
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+};
+
+/**
+ * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: upper 32 bit IEP counter
+ */
+int icss_iep_get_count_hi(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
+
+/**
+ * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: lower 32 bit IEP counter
+ */
+int icss_iep_get_count_low(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep)
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
+
+/**
+ * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: PTP clock index, -1 if not registered
+ */
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
+{
+ if (!iep || !iep->ptp_clock)
+ return -1;
+ return ptp_clock_index(iep->ptp_clock);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
+
+static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+{
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns), iep->base +
+ iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+ writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+
+/**
+ * icss_iep_settime() - Set time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @ns: Time to be set in nanoseconds
+ *
+ * This API uses writel() instead of regmap_write() for write operations as
+ * regmap_write() is too slow and this API is time sensitive.
+ */
+static void icss_iep_settime(struct icss_iep *iep, u64 ns)
+{
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->settime) {
+ iep->ops->settime(iep->clockops_data, ns);
+ return;
+ }
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ if (iep->pps_enabled || iep->perout_enabled)
+ writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+
+ icss_iep_set_counter(iep, ns);
+
+ if (iep->pps_enabled || iep->perout_enabled) {
+ icss_iep_update_to_next_boundary(iep, ns);
+ writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
+ iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+ }
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+}
+
+/**
+ * icss_iep_gettime() - Get time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @sts: Pointer to structure representing PTP system timestamp.
+ *
+ * This API uses readl() instead of regmap_read() for read operations as
+ * regmap_read() is too slow and this API is time sensitive.
+ *
+ * Return: The current timestamp of the PTP clock using IEP driver
+ */
+static u64 icss_iep_gettime(struct icss_iep *iep,
+ struct ptp_system_timestamp *sts)
+{
+ u32 ts_hi = 0, ts_lo;
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->gettime)
+ return iep->ops->gettime(iep->clockops_data, sts);
+
+ /* use local_irq_x() to make it work for both RT/non-RT */
+ local_irq_save(flags);
+
+ /* no need to play with hi-lo, hi is latched when lo is read */
+ ptp_read_system_prets(sts);
+ ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ ptp_read_system_postts(sts);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ local_irq_restore(flags);
+
+ return (u64)ts_lo | (u64)ts_hi << 32;
+}
+
+static void icss_iep_enable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ IEP_GLOBAL_CFG_CNT_ENABLE);
+}
+
+static void icss_iep_disable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ 0);
+}
+
+static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+{
+ u32 cycle_time;
+ int cmp;
+
+ cycle_time = iep->cycle_time_ns - iep->def_inc;
+
+ icss_iep_disable(iep);
+
+ /* disable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, 0);
+
+ /* enable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
+
+ /* clear counters */
+ icss_iep_set_counter(iep, 0);
+
+ /* clear overflow status */
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
+ IEP_GLOBAL_STATUS_CNT_OVF,
+ IEP_GLOBAL_STATUS_CNT_OVF);
+
+ /* clear compare status */
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+ }
+
+ /* enable reset counter on CMP0 event */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN);
+ /* enable compare */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(0),
+ IEP_CMP_CFG_CMP_EN(0));
+
+ /* set CMP0 value to cycle time */
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
+
+ icss_iep_set_counter(iep, 0);
+ icss_iep_enable(iep);
+}
+
+static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
+ def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_inc > IEP_MAX_COMPEN_INC) {
+ dev_err(dev, "%s: too high compensation inc %d\n",
+ __func__, compen_inc);
+ compen_inc = IEP_MAX_COMPEN_INC;
+ }
+
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_COMPEN_INC_MASK,
+ compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_count > IEP_MAX_COMPEN_COUNT) {
+ dev_err(dev, "%s: too high compensation count %d\n",
+ __func__, compen_count);
+ compen_count = IEP_MAX_COMPEN_COUNT;
+ }
+
+ regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
+}
+
+static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
+}
+
+/* PTP PHC operations */
+static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ u32 cyc_count;
+ u16 cmp_inc;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ /* ppb is amount of frequency we want to adjust in 1GHz (billion)
+ * e.g. 100ppb means we need to speed up clock by 100Hz
+ * i.e. at end of 1 second (1 billion ns) clock time, we should be
+ * counting 100 more ns.
+ * We use IEP slow compensation to achieve continuous freq. adjustment.
+ * There are 2 parts. Cycle time and adjustment per cycle.
+ * Simplest case would be 1 sec Cycle time. Then adjustment
+ * pre cycle would be (def_inc + ppb) value.
+ * Cycle time will have to be chosen based on how worse the ppb is.
+ * e.g. smaller the ppb, cycle time has to be large.
+ * The minimum adjustment we can do is +-1ns per cycle so let's
+ * reduce the cycle time to get 1ns per cycle adjustment.
+ * 1ppb = 1sec cycle time & 1ns adjust
+ * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
+ */
+
+ if (iep->cycle_time_ns)
+ iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
+ else
+ iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
+
+ if (ppb < 0) {
+ iep->slow_cmp_inc = -iep->slow_cmp_inc;
+ ppb = -ppb;
+ }
+
+ cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
+ cyc_count /= ppb; /* cycle time per ppb */
+
+ /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
+ if (!iep->cycle_time_ns)
+ cyc_count /= iep->clk_tick_time;
+ iep->slow_cmp_count = cyc_count;
+
+ /* iep->clk_tick_time is def_inc */
+ cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
+ icss_iep_set_compensation_inc(iep, cmp_inc);
+ icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ if (iep->ops && iep->ops->adjtime) {
+ iep->ops->adjtime(iep->clockops_data, delta);
+ } else {
+ ns = icss_iep_gettime(iep, NULL);
+ ns += delta;
+ icss_iep_settime(iep, ns);
+ }
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = icss_iep_gettime(iep, sts);
+ *ts = ns_to_timespec64(ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = timespec64_to_ns(ts);
+ icss_iep_settime(iep, ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
+{
+ u64 ns, p_ns;
+ u32 offset;
+
+ ns = icss_iep_gettime(iep, NULL);
+ if (start_ns < ns)
+ start_ns = ns;
+ p_ns = iep->period;
+ /* Round up to next period boundary */
+ start_ns += p_ns - 1;
+ offset = do_div(start_ns, p_ns);
+ start_ns = start_ns * p_ns;
+ /* If it is too close to update, shift to next boundary */
+ if (p_ns - offset < 10)
+ start_ns += p_ns;
+
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
+}
+
+static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ int ret;
+ u64 cmp;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
+ if (ret)
+ return ret;
+
+ if (on) {
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, 1ms pulse width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+ }
+ } else {
+ if (on) {
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int icss_iep_perout_enable(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+{
+ struct ptp_clock_request rq;
+ struct timespec64 ts;
+ unsigned long flags;
+ int ret = 0;
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->perout_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->pps_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+
+ rq.perout.index = 0;
+ if (on) {
+ ns = icss_iep_gettime(iep, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ } else {
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ }
+
+ if (!ret)
+ iep->pps_enabled = !!on;
+
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
+{
+ u32 val, cap, ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->ops && iep->ops->extts_enable) {
+ ret = iep->ops->extts_enable(iep->clockops_data, index, on);
+ goto exit;
+ }
+
+ if (((iep->latch_enable & BIT(index)) >> index) == on)
+ goto exit;
+
+ regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
+ cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
+ if (on) {
+ val |= cap;
+ iep->latch_enable |= BIT(index);
+ } else {
+ val &= ~cap;
+ iep->latch_enable &= ~BIT(index);
+ }
+ regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return icss_iep_perout_enable(iep, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return icss_iep_pps_enable(iep, on);
+ case PTP_CLK_REQ_EXTTS:
+ return icss_iep_extts_enable(iep, rq->extts.index, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info icss_iep_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ICSS IEP timer",
+ .max_adj = 10000000,
+ .adjfine = icss_iep_ptp_adjfine,
+ .adjtime = icss_iep_ptp_adjtime,
+ .gettimex64 = icss_iep_ptp_gettimeex,
+ .settime64 = icss_iep_ptp_settime,
+ .enable = icss_iep_ptp_enable,
+};
+
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+{
+ struct platform_device *pdev;
+ struct device_node *iep_np;
+ struct icss_iep *iep;
+
+ iep_np = of_parse_phandle(np, "ti,iep", idx);
+ if (!iep_np || !of_device_is_available(iep_np))
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(iep_np);
+ of_node_put(iep_np);
+
+ if (!pdev)
+ /* probably IEP not yet probed */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ iep = platform_get_drvdata(pdev);
+ if (!iep)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ device_lock(iep->dev);
+ if (iep->client_np) {
+ device_unlock(iep->dev);
+ dev_err(iep->dev, "IEP is already acquired by %s",
+ iep->client_np->name);
+ return ERR_PTR(-EBUSY);
+ }
+ iep->client_np = np;
+ device_unlock(iep->dev);
+ get_device(iep->dev);
+
+ return iep;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_idx);
+
+struct icss_iep *icss_iep_get(struct device_node *np)
+{
+ return icss_iep_get_idx(np, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get);
+
+void icss_iep_put(struct icss_iep *iep)
+{
+ device_lock(iep->dev);
+ iep->client_np = NULL;
+ device_unlock(iep->dev);
+ put_device(iep->dev);
+}
+EXPORT_SYMBOL_GPL(icss_iep_put);
+
+void icss_iep_init_fw(struct icss_iep *iep)
+{
+ /* start IEP for FW use in raw 64bit mode, no PTP support */
+ iep->clk_tick_time = iep->def_inc;
+ iep->cycle_time_ns = 0;
+ iep->ops = NULL;
+ iep->clockops_data = NULL;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_init_fw);
+
+void icss_iep_exit_fw(struct icss_iep *iep)
+{
+ icss_iep_disable(iep);
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
+
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns)
+{
+ int ret = 0;
+
+ iep->cycle_time_ns = cycle_time_ns;
+ iep->clk_tick_time = iep->def_inc;
+ iep->ops = clkops;
+ iep->clockops_data = clockops_data;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
+ !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
+ goto skip_perout;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ iep->ptp_info.n_per_out = 1;
+ iep->ptp_info.pps = 1;
+ }
+
+ if (iep->ops && iep->ops->extts_enable)
+ iep->ptp_info.n_ext_ts = 2;
+
+skip_perout:
+ if (cycle_time_ns)
+ icss_iep_enable_shadow_mode(iep);
+ else
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, ktime_get_real_ns());
+
+ iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
+ if (IS_ERR(iep->ptp_clock)) {
+ ret = PTR_ERR(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icss_iep_init);
+
+int icss_iep_exit(struct icss_iep *iep)
+{
+ if (iep->ptp_clock) {
+ ptp_clock_unregister(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ }
+ icss_iep_disable(iep);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit);
+
+static int icss_iep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct icss_iep *iep;
+ struct clk *iep_clk;
+
+ iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
+ if (!iep)
+ return -ENOMEM;
+
+ iep->dev = dev;
+ iep->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iep->base))
+ return -ENODEV;
+
+ iep_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(iep_clk))
+ return PTR_ERR(iep_clk);
+
+ iep->refclk_freq = clk_get_rate(iep_clk);
+
+ iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
+ if (iep->def_inc > IEP_MAX_DEF_INC) {
+ dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
+ iep->def_inc);
+ return -EINVAL;
+ }
+
+ iep->plat_data = device_get_match_data(dev);
+ if (!iep->plat_data)
+ return -EINVAL;
+
+ iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
+ if (IS_ERR(iep->map)) {
+ dev_err(dev, "Failed to create regmap for IEP %ld\n",
+ PTR_ERR(iep->map));
+ return PTR_ERR(iep->map);
+ }
+
+ iep->ptp_info = icss_iep_ptp_info;
+ mutex_init(&iep->ptp_clk_mutex);
+ spin_lock_init(&iep->irq_lock);
+ dev_set_drvdata(dev, iep);
+ icss_iep_disable(iep);
+
+ return 0;
+}
+
+static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int icss_iep_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct icss_iep *iep = context;
+
+ writel(val, iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static int icss_iep_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct icss_iep *iep = context;
+
+ *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static struct regmap_config am654_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am654_icss_iep_valid_reg,
+ .readable_reg = am654_icss_iep_valid_reg,
+ .fast_io = 1,
+};
+
+static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
+ ICSS_IEP_SHADOW_MODE_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static const struct of_device_id icss_iep_of_match[] = {
+ {
+ .compatible = "ti,am654-icss-iep",
+ .data = &am654_icss_iep_plat_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, icss_iep_of_match);
+
+static struct platform_driver icss_iep_driver = {
+ .driver = {
+ .name = "icss-iep",
+ .of_match_table = icss_iep_of_match,
+ },
+ .probe = icss_iep_probe,
+};
+module_platform_driver(icss_iep_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI ICSS IEP driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
new file mode 100644
index 000000000000..803a4b714893
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSS_IEP_H
+#define __NET_TI_ICSS_IEP_H
+
+#include <linux/mutex.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/regmap.h>
+
+struct icss_iep;
+extern const struct icss_iep_clockops prueth_iep_clockops;
+
+/* Firmware specific clock operations */
+struct icss_iep_clockops {
+ void (*settime)(void *clockops_data, u64 ns);
+ void (*adjtime)(void *clockops_data, s64 delta);
+ u64 (*gettime)(void *clockops_data, struct ptp_system_timestamp *sts);
+ int (*perout_enable)(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp);
+ int (*extts_enable)(void *clockops_data, u32 index, int on);
+};
+
+struct icss_iep *icss_iep_get(struct device_node *np);
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx);
+void icss_iep_put(struct icss_iep *iep);
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns);
+int icss_iep_exit(struct icss_iep *iep);
+int icss_iep_get_count_low(struct icss_iep *iep);
+int icss_iep_get_count_hi(struct icss_iep *iep);
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep);
+void icss_iep_init_fw(struct icss_iep *iep);
+void icss_iep_exit_fw(struct icss_iep *iep);
+
+#endif /* __NET_TI_ICSS_IEP_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index ab648d3efe85..933b84666574 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -210,6 +210,10 @@ void icssg_config_ipg(struct prueth_emac *emac)
case SPEED_100:
icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
break;
+ case SPEED_10:
+ /* IPG for 10M is same as 100M */
+ icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
@@ -440,6 +444,9 @@ void icssg_config_set_speed(struct prueth_emac *emac)
case SPEED_100:
fw_speed = FW_LINK_SPEED_100M;
break;
+ case SPEED_10:
+ fw_speed = FW_LINK_SPEED_10M;
+ break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 02c312f01d10..a27ec1dcc8d5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -109,6 +109,26 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
*(data++) = emac->stats[i];
}
+static int emac_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static int emac_set_channels(struct net_device *ndev,
struct ethtool_channels *ch)
{
@@ -176,6 +196,7 @@ const struct ethtool_ops icssg_ethtool_ops = {
.get_sset_count = emac_get_sset_count,
.get_ethtool_stats = emac_get_ethtool_stats,
.get_strings = emac_get_strings,
+ .get_ts_info = emac_get_ts_info,
.get_channels = emac_get_channels,
.set_channels = emac_set_channels,
.get_link_ksettings = emac_get_link_ksettings,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 47b941fb0198..410612f43cbd 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -56,6 +56,8 @@
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
@@ -471,6 +473,37 @@ static int prueth_dma_rx_push(struct prueth_emac *emac,
desc_rx, desc_dma);
}
+static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+
+static void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
@@ -480,6 +513,7 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
struct sk_buff *skb, *new_skb;
dma_addr_t desc_dma, buf_dma;
void **swdata;
+ u32 *psdata;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -497,6 +531,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -557,6 +596,86 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static int emac_get_tx_ts(struct prueth_emac *emac,
+ struct emac_tx_ts_response *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
+
+ return 0;
+}
+
+static void tx_ts_work(struct prueth_emac *emac)
+{
+ struct skb_shared_hwtstamps ssh;
+ struct emac_tx_ts_response tsr;
+ struct sk_buff *skb;
+ int ret = 0;
+ u32 hi_sw;
+ u64 ns;
+
+ /* There may be more than one pending requests */
+ while (1) {
+ ret = emac_get_tx_ts(emac, &tsr);
+ if (ret) /* nothing more */
+ break;
+
+ if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
+ !emac->tx_ts_skb[tsr.cookie]) {
+ netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ tsr.cookie);
+ break;
+ }
+
+ skb = emac->tx_ts_skb[tsr.cookie];
+ emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
+ if (!skb) {
+ netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
+ break;
+ }
+
+ hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+
+ if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
+ break;
+ }
+}
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
/**
* emac_ndo_start_xmit - EMAC Transmit function
* @skb: SKB pointer
@@ -577,6 +696,8 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
void **swdata;
u32 pkt_len;
u32 *epib;
@@ -608,6 +729,18 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
epib = first_desc->epib;
epib[0] = 0;
epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
/* set dst tag to indicate internal qid at the firmware which is at
* bit8..bit15. bit0..bit7 indicates port num for directed
@@ -629,7 +762,7 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
if (!next_desc) {
netdev_err(ndev,
"tx: failed to allocate frag. descriptor\n");
- goto free_desc_stop_q_busy;
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
}
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
@@ -638,7 +771,7 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
netdev_err(ndev, "tx: Failed to map skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ret = NETDEV_TX_OK;
- goto drop_free_descs;
+ goto cleanup_tx_ts;
}
cppi5_hdesc_reset_hbdesc(next_desc);
@@ -670,6 +803,9 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
goto drop_free_descs;
}
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
netif_tx_stop_queue(netif_txq);
/* Barrier, so that stop_queue visible to other cpus */
@@ -682,6 +818,12 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
return NETDEV_TX_OK;
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
drop_free_descs:
prueth_xmit_free(tx_chn, first_desc);
@@ -694,7 +836,11 @@ drop_free_skb:
return ret;
-free_desc_stop_q_busy:
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
prueth_xmit_free(tx_chn, first_desc);
drop_stop_q_busy:
@@ -717,6 +863,16 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ /* currently only TX timestamp is being returned */
+ tx_ts_work(emac);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
@@ -820,6 +976,18 @@ static void prueth_emac_stop(struct prueth_emac *emac)
rproc_shutdown(prueth->pru[slice]);
}
+static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -881,6 +1049,7 @@ static void emac_adjust_link(struct net_device *ndev)
netif_tx_wake_all_queues(ndev);
} else {
netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
}
}
@@ -980,7 +1149,6 @@ static int emac_phy_connect(struct prueth_emac *emac)
/* remove unsupported modes */
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
@@ -992,6 +1160,139 @@ static int emac_phy_connect(struct prueth_emac *emac)
return 0;
}
+static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
+{
+ u32 hi_rollover_count, hi_rollover_count_r;
+ struct prueth_emac *emac = clockops_data;
+ struct prueth *prueth = emac->prueth;
+ void __iomem *fw_hi_r_count_addr;
+ void __iomem *fw_count_hi_addr;
+ u32 iepcount_hi, iepcount_hi_r;
+ unsigned long flags;
+ u32 iepcount_lo;
+ u64 ts = 0;
+
+ fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
+ fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
+
+ local_irq_save(flags);
+ do {
+ iepcount_hi = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi += readl(fw_count_hi_addr);
+ hi_rollover_count = readl(fw_hi_r_count_addr);
+ ptp_read_system_prets(sts);
+ iepcount_lo = icss_iep_get_count_low(emac->iep);
+ ptp_read_system_postts(sts);
+
+ iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi_r += readl(fw_count_hi_addr);
+ hi_rollover_count_r = readl(fw_hi_r_count_addr);
+ } while ((iepcount_hi_r != iepcount_hi) ||
+ (hi_rollover_count != hi_rollover_count_r));
+ local_irq_restore(flags);
+
+ ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
+ ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
+
+ return ts;
+}
+
+static void prueth_iep_settime(void *clockops_data, u64 ns)
+{
+ struct icssg_setclock_desc __iomem *sc_descp;
+ struct prueth_emac *emac = clockops_data;
+ struct icssg_setclock_desc sc_desc;
+ u64 cyclecount;
+ u32 cycletime;
+ int timeout;
+
+ if (!emac->fw_running)
+ return;
+
+ sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+
+ cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+ cyclecount = ns / cycletime;
+
+ memset(&sc_desc, 0, sizeof(sc_desc));
+ sc_desc.margin = cycletime - 1000;
+ sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
+ sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
+ sc_desc.iepcount_set = ns % cycletime;
+ sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+
+ memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
+
+ writeb(1, &sc_descp->request);
+
+ timeout = 5; /* fw should take 2-3 ms */
+ while (timeout--) {
+ if (readb(&sc_descp->acknowledgment))
+ return;
+
+ usleep_range(500, 1000);
+ }
+
+ dev_err(emac->prueth->dev, "settime timeout\n");
+}
+
+static int prueth_perout_enable(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp)
+{
+ struct prueth_emac *emac = clockops_data;
+ u32 reduction_factor = 0, offset = 0;
+ struct timespec64 ts;
+ u64 ns_period;
+
+ if (!on)
+ return 0;
+
+ /* Any firmware specific stuff for PPS/PEROUT handling */
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ /* f/w doesn't support period less than cycle time */
+ if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
+ return -ENXIO;
+
+ reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
+ offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
+
+ /* f/w requires at least 1uS within a cycle so CMP
+ * can trigger after SYNC is enabled
+ */
+ if (offset < 5 * NSEC_PER_USEC)
+ offset = 5 * NSEC_PER_USEC;
+
+ /* if offset is close to cycle time then we will miss
+ * the CMP event for last tick when IEP rolls over.
+ * In normal mode, IEP tick is 4ns.
+ * In slow compensation it could be 0ns or 8ns at
+ * every slow compensation cycle.
+ */
+ if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
+ offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
+
+ /* we're in shadow mode so need to set upper 32-bits */
+ *cmp = (u64)offset << 32;
+
+ writel(reduction_factor, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
+
+ writel(0, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+
+ return 0;
+}
+
+const struct icss_iep_clockops prueth_iep_clockops = {
+ .settime = prueth_iep_settime,
+ .gettime = prueth_iep_gettime,
+ .perout_enable = prueth_perout_enable,
+};
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -1066,10 +1367,20 @@ static int emac_ndo_open(struct net_device *ndev)
icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ if (!prueth->emacs_initialized) {
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ IRQF_ONESHOT, dev_name(dev), emac);
+ if (ret)
+ goto stop;
+
/* Prepare RX */
ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
if (ret)
- goto stop;
+ goto free_tx_ts_irq;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
if (ret)
@@ -1102,6 +1413,8 @@ reset_tx_chan:
prueth_reset_tx_chan(emac, i, false);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+free_tx_ts_irq:
+ free_irq(emac->tx_ts_irq, emac);
stop:
prueth_emac_stop(emac);
free_rx_irq:
@@ -1173,6 +1486,14 @@ static int emac_ndo_stop(struct net_device *ndev)
/* stop PRUs */
prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ icss_iep_exit(emac->iep);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ free_irq(emac->tx_ts_irq, emac);
+
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
prueth_cleanup_tx_chns(emac);
@@ -1235,8 +1556,79 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
return phy_do_ioctl(ndev, ifr, cmd);
}
@@ -1316,6 +1708,7 @@ static int prueth_netdev_init(struct prueth *prueth,
struct prueth_emac *emac;
struct net_device *ndev;
enum prueth_port port;
+ const char *irq_name;
enum prueth_mac mac;
port = prueth_node_port(eth_node);
@@ -1355,6 +1748,15 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->tx_ch_num = 1;
+ irq_name = "tx_ts0";
+ if (emac->port_id == PRUETH_PORT_MII1)
+ irq_name = "tx_ts1";
+ emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
+ if (emac->tx_ts_irq < 0) {
+ ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
+ goto free;
+ }
+
SET_NETDEV_DEV(ndev, prueth->dev);
spin_lock_init(&emac->lock);
mutex_init(&emac->cmd_lock);
@@ -1680,14 +2082,38 @@ static int prueth_probe(struct platform_device *pdev)
dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
prueth->msmcram.va, prueth->msmcram.size);
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
+ prueth->iep0 = NULL;
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+ goto free_pool;
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue) {
+ /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
+ * traffic.
+ */
+ icss_iep_init_fw(prueth->iep1);
+ }
+
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
if (ret) {
dev_err_probe(dev, ret, "netdev init %s failed\n",
eth0_node->name);
- goto netdev_exit;
+ goto exit_iep;
}
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
if (eth1_node) {
@@ -1697,6 +2123,8 @@ static int prueth_probe(struct platform_device *pdev)
eth1_node->name);
goto netdev_exit;
}
+
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
/* register the network devices */
@@ -1754,6 +2182,11 @@ netdev_exit:
prueth_netdev_exit(prueth, eth_node);
}
+exit_iep:
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+free_pool:
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va, msmc_ram_size);
@@ -1798,6 +2231,12 @@ static void prueth_remove(struct platform_device *pdev)
prueth_netdev_exit(prueth, eth_node);
}
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va,
MSMC_RAM_SIZE);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index a8ce4d01ef16..3fe80a8758d3 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -35,6 +35,7 @@
#include <net/devlink.h>
#include "icssg_config.h"
+#include "icss_iep.h"
#include "icssg_switch_map.h"
#define PRUETH_MAX_MTU (2000 - ETH_HLEN - ETH_FCS_LEN)
@@ -122,6 +123,8 @@ struct prueth_rx_chn {
*/
#define PRUETH_MAX_TX_QUEUES 4
+#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+
/* data for each emac port */
struct prueth_emac {
bool fw_running;
@@ -139,6 +142,9 @@ struct prueth_emac {
struct device_node *phy_node;
phy_interface_t phy_if;
enum prueth_port port_id;
+ struct icss_iep *iep;
+ unsigned int rx_ts_enabled : 1;
+ unsigned int tx_ts_enabled : 1;
/* DMA related */
struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
@@ -150,7 +156,15 @@ struct prueth_emac {
spinlock_t lock; /* serialize access */
- unsigned long state;
+ /* TX HW Timestamping */
+ /* TX TS cookie will be index to the tx_ts_skb array */
+ struct sk_buff *tx_ts_skb[PRUETH_MAX_TX_TS_REQUESTS];
+ atomic_t tx_ts_pending;
+ int tx_ts_irq;
+
+ u8 cmd_seq;
+ /* shutdown related */
+ u32 cmd_data[4];
struct completion cmd_complete;
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
@@ -193,6 +207,8 @@ struct prueth_pdata {
* @pdata: pointer to platform data for ICSSG driver
* @icssg_hwcmdseq: seq counter or HWQ messages
* @emacs_initialized: num of EMACs/ext ports that are up/running
+ * @iep0: pointer to IEP0 device
+ * @iep1: pointer to IEP1 device
*/
struct prueth {
struct device *dev;
@@ -214,8 +230,16 @@ struct prueth {
struct platform_device *pdev;
struct prueth_pdata pdata;
u8 icssg_hwcmdseq;
-
int emacs_initialized;
+ struct icss_iep *iep0;
+ struct icss_iep *iep1;
+};
+
+struct emac_tx_ts_response {
+ u32 reserved[2];
+ u32 cookie;
+ u32 lo_ts;
+ u32 hi_ts;
};
/* get PRUSS SLICE number from prueth_emac */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index bb234cf0cea0..332c853ca99b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9832,6 +9832,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{}
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 6eef6bc430e2..9d248ba1c0b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
* fixed parameter portion is assumed, otherwise
* ssid in the fixed portion is ignored
*/
- __le16 channel_list[1]; /* list of chanspecs */
+ union {
+ __le16 padding; /* Reserve space for at least 1 entry for abort
+ * which uses an on stack brcmf_scan_params_v2_le
+ */
+ DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */
+ };
};
struct brcmf_scan_results {
@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
struct brcmf_chanspec_list {
__le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
+ __le32 element[]; /* variable length uint32 list */
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f5e08988dc7b..06d6f7f66430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 5c719636c9bd..a5348b015310 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -819,7 +819,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 5449deb3c2d6..46e207211f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
-
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d9ce753468..3cbe2c0b8d6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
struct iwl_mvm_vif_link_info *link_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ec18e348553d..674ddf951b79 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1636,6 +1636,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1988,6 +1989,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 391793a16adc..10690e82358b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
- while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ while (tlv_buf_left > sizeof(*tlv_rxba)) {
tlv_type = le16_to_cpu(tlv_rxba->header.type);
tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(tlv_rxba->header), tlv_len),
+ tlv_buf_left);
+ return;
+ }
+
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
mwifiex_dbg(priv->adapter, ERROR,
"Wrong TLV id=0x%x\n", tlv_type);
@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
+ tlv_buf_left);
+ return;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
@@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index f2168fac95ed..8e6db904e5b2 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync {
u8 reserved;
__le16 seq_num;
__le16 bitmap_len;
- u8 bitmap[1];
+ u8 bitmap[];
} __packed;
struct chan_band_param_set {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 65420ad67416..257737137cd7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 05d9ab3ce819..dc8f4e157eb2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
return t;
}
@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
if (!t)
return;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 0acabba2d1a5..5d402cf2951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index d5809408d1d3..8c01855885ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 3642a2c7f80c..2434e2480cbe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
u8 vender_id[2]; /* 0x100 */
u8 product_id[2]; /* 0x102 */
u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
};