summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c582
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c31
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c7
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c28
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c62
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h115
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c41
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c30
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c37
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c36
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c5
97 files changed, 1208 insertions, 633 deletions
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0c12cf7bda50..3f65f2b370c5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2543,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 098b0328e3cb..ff9f96de74b8 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2312,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74c1778d841e..b455b60a5434 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2383,7 +2383,8 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fa147865e33f..7c21aaa8b9af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1160,16 +1160,6 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
schedule_work(&bp->sp_task);
}
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- flush_workqueue(bnxt_pf_wq);
- } else {
- cancel_work_sync(&bp->sp_task);
- cancel_delayed_work_sync(&bp->fw_reset_task);
- }
-}
-
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
@@ -4109,7 +4099,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
- if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
@@ -4362,7 +4353,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (BNXT_NO_FW_ACCESS(bp))
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -7766,6 +7758,7 @@ static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
{
u64 sw_tmp;
+ hw &= mask;
sw_tmp = (*sw & ~mask) | hw;
if (hw < (*sw & mask))
sw_tmp += mask + 1;
@@ -9789,7 +9782,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+ rc = -EIO;
+ if (!rc)
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
dev_close(bp->dev);
@@ -12108,15 +12104,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
-
- bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ /* Flush any pending tasks */
+ cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ bp->sp_event = 0;
+
+ bnxt_dl_fw_reporters_destroy(bp, true);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -12860,6 +12858,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ if (state == pci_channel_io_frozen)
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
if (netif_running(netdev))
bnxt_close(netdev);
@@ -12886,7 +12887,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0;
+ int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12898,6 +12899,20 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ /* Upon fatal error, our device internal logic that latches to
+ * BAR value is getting reset and will restore only upon
+ * rewritting the BARs.
+ *
+ * As pci_restore_state() does not re-write the BARs if the
+ * value is same as saved value earlier, driver needs to
+ * write the BARs to 0 to force restore, in case of fatal error.
+ */
+ if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+ &bp->state)) {
+ for (off = PCI_BASE_ADDRESS_0;
+ off <= PCI_BASE_ADDRESS_5; off += 4)
+ pci_write_config_dword(bp->pdev, off, 0);
+ }
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 21ef1c21f602..47b3c3127879 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1781,6 +1781,7 @@ struct bnxt {
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 53687bc7fcf5..1471c9a36238 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2079,6 +2079,9 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_input req = {0};
int rc;
+ if (BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -2997,7 +3000,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
start, length, data);
}
return rc;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 883e47c5b1a7..286f0341bdf8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1929,7 +1929,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3352dad6ca99..27308600da15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0273f40b85f7..17410fe86626 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "TX sw fallback : %20llu\n",
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
while (i < MAX_NPORTS) {
ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Port %d\n", i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 6ec5f2f26f05..4e55f7081644 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
- if (err)
- goto smac_err;
-
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (!err)
return 0;
@@ -862,6 +862,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -879,7 +880,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
@@ -1323,11 +1324,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
@@ -1363,11 +1361,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
@@ -2039,6 +2034,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
}
return;
}
+ switch (f->fs.action) {
+ case FILTER_PASS:
+ if (f->fs.dirsteer)
+ set_tcb_tflag(adap, f, tid,
+ TF_DIRECT_STEER_S, 1, 1);
+ break;
+ case FILTER_DROP:
+ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+ break;
+ case FILTER_SWITCH:
+ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+ break;
+ }
+
break;
default:
@@ -2106,22 +2115,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- int err = 0;
-
- if (f->fs.newsmac)
- err = configure_filter_smac(adap, f);
-
- if (!err) {
- f->pending = 0; /* async setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
- }
- } else {
- clear_filter(adap, f);
- if (ctx)
- ctx->result = err;
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
}
} else {
/* Something went wrong. Issue a warning about the
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a952fe198eb9..7fd264a6d085 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b169776ab484..1b49f2fa9b18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_fallback;
};
#endif
@@ -493,6 +494,11 @@ struct cxgb4_uld_info {
#endif
};
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a9e9c7ae565d..196652a114c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
}
EXPORT_SYMBOL(cxgb4_write_sgl);
+/* cxgb4_write_partial_sgl - populate SGL for partial packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ * @start: start offset in the SKB where partial data starts
+ * @len: length of data from @start to send out
+ *
+ * This API will handle sending out partial data of a skb if required.
+ * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
+ * and @len will decide how much data after @start offset to send out.
+ */
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 len)
+{
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u8 i = 0, frag_idx = 0, nfrags = 0;
+ skb_frag_t *frag;
+
+ /* Fill the first SGL either from linear data or from partial
+ * frag based on @start.
+ */
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ sgl->len0 = htonl(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ len -= frag_size;
+ nfrags++;
+ } else {
+ start -= skb_linear_data_len;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ /* find the first frag */
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_size = min(len, skb_frag_size(frag) - start);
+ sgl->len0 = cpu_to_be32(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+
+ /* If the entire partial data fit in one SGL, then send it out
+ * now.
+ */
+ if (!len)
+ goto done;
+
+ /* Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ /* If the skb couldn't fit in first SGL completely, fill the
+ * rest of the frags in subsequent SGLs. Note that each SGL
+ * pair can store 2 frags.
+ */
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ to->len[i & 1] = cpu_to_be32(frag_size);
+ to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
+ if (i && (i & 1))
+ to++;
+ nfrags++;
+ frag_idx++;
+ i++;
+ len -= frag_size;
+ }
+
+ /* If we ended in an odd boundary, then set the second SGL's
+ * length in the pair to 0.
+ */
+ if (i & 1)
+ to->len[1] = cpu_to_be32(0);
+
+ /* Copy from temporary buffer to Tx ring, in case we hit the
+ * end of the queue in the middle of writing the SGL.
+ */
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+
+ /* 0-pad to multiple of 16 */
+ if ((uintptr_t)end & 8)
+ *end = 0;
+done:
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
+}
+EXPORT_SYMBOL(cxgb4_write_partial_sgl);
+
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host.
@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
- if (skb->decrypted)
+ if (cxgb4_is_ktls_skb(skb) &&
+ (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 50232e063f49..92473dda55d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -50,6 +50,10 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+#define TF_DROP_S 22
+#define TF_DIRECT_STEER_S 23
+#define TF_LPBK_S 59
+
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 5195f692f14d..c24485c0d512 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -14,6 +14,50 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
+ * @skb: skb
+ * @start: start offset.
+ * @len: how much data to send after @start
+ */
+static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+{
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ u8 nfrags = 0, frag_idx = 0;
+ skb_frag_t *frag;
+
+ /* if its a linear skb then return 1 */
+ if (!skb_is_nonlinear(skb))
+ return 1;
+
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ start = 0;
+ } else {
+ start -= skb_linear_data_len;
+
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+ frag_size = min(len, skb_frag_size(frag) - start);
+ }
+ len -= frag_size;
+ nfrags++;
+
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+ return nfrags;
+}
+
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
@@ -689,7 +733,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
}
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
- u32 tid, void *pos, u16 word, u64 mask,
+ u32 tid, void *pos, u16 word,
+ struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply)
{
struct cpl_set_tcb_field_core *cpl;
@@ -698,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */
txpkt = pos;
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */
@@ -753,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else {
u8 buf[48] = {0};
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos,
@@ -761,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
}
}
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply);
/* check again if we are at the end of the queue */
@@ -783,11 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq,
- u64 tcp_ack, u64 tcp_win)
+ u64 tcp_ack, u64 tcp_win, bool offset)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats;
- u32 len, cpl = 0, ndesc, wr_len;
+ u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr;
int credits;
void *pos;
@@ -803,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length.
@@ -818,7 +871,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++;
}
/* reset snd una if it's a re-transmit pkt */
- if (tcp_seq != tx_info->prev_seq) {
+ if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */
port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -827,7 +880,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&port_stats->ktls_tx_ooo);
+ if (tcp_seq != tx_info->prev_seq)
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -856,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+ wr->flowid_len16 = htonl(wr_mid |
+ FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc);
@@ -866,34 +921,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/*
- * chcr_ktls_skb_copy
- * @nskb - new skb where the frags to be added.
- * @skb - old skb from which frags will be copied.
- */
-static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
- }
-
- skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
- nskb->len += skb->data_len;
- nskb->data_len = skb->data_len;
- nskb->truesize += skb->data_len;
-}
-
-/*
* chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments.
*/
static unsigned int
-chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{
- return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
}
@@ -957,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp;
int len16, pktlen;
struct iphdr *ip;
+ u32 wr_mid = 0;
int credits;
u8 buf[150];
+ u64 cntrl1;
void *pos;
iplen = skb_network_header_len(skb);
@@ -967,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb->len - skb->data_len;
+ pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -980,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
wr = pos;
@@ -987,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
- wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
@@ -997,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cpl->len = htons(pktlen);
- /* checksum offload */
- cpl->ctrl1 = 0;
-
- pos = cpl + 1;
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
+
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+
+ pos = cpl + 1;
+
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet.
@@ -1031,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0;
}
-/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
- * @tgt- buffer into which tail data gets added
- * @skb- buffer from which the paged data comes from
- * @shiftlen- shift up to this many bytes
- */
-static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen)
-{
- skb_frag_t *fragfrom, *fragto;
- int from, to, todo;
-
- WARN_ON(shiftlen > skb->data_len);
-
- todo = shiftlen;
- from = 0;
- to = 0;
- fragfrom = &skb_shinfo(skb)->frags[from];
-
- while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
- fragfrom = &skb_shinfo(skb)->frags[from];
- fragto = &skb_shinfo(tgt)->frags[to];
-
- if (todo >= skb_frag_size(fragfrom)) {
- *fragto = *fragfrom;
- todo -= skb_frag_size(fragfrom);
- from++;
- to++;
-
- } else {
- __skb_frag_ref(fragfrom);
- skb_frag_page_copy(fragto, fragfrom);
- skb_frag_off_copy(fragto, fragfrom);
- skb_frag_size_set(fragto, todo);
-
- skb_frag_off_add(fragfrom, todo);
- skb_frag_size_sub(fragfrom, todo);
- todo = 0;
-
- to++;
- break;
- }
- }
-
- /* Ready to "commit" this state change to tgt */
- skb_shinfo(tgt)->nr_frags = to;
-
- /* Reposition in the original skb */
- to = 0;
- while (from < skb_shinfo(skb)->nr_frags)
- skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
-
- skb_shinfo(skb)->nr_frags = to;
-
- WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
-
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
-
- return shiftlen;
-}
-
/*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so
@@ -1111,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq,
+ bool is_last_wr, u32 data_len,
+ u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss)
{
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
@@ -1126,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end;
/* get the number of flits required */
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */
ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */
@@ -1155,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (!is_last_wr)
+ skb_get(skb);
+
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
/* FW_ULPTX_WR */
@@ -1187,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
- cpl->pldlen = htonl(skb->data_len);
+ cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
@@ -1229,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
- tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq);
@@ -1249,8 +1237,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
}
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1282,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data,
- u32 prior_data_len)
+ u32 prior_data_len, u32 data_len,
+ u32 skb_offset)
{
+ u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap;
- u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc;
int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc;
@@ -1298,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos;
u64 *end;
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added.
*/
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need
@@ -1373,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
- cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0;
@@ -1404,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push)
@@ -1437,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1466,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data,
+ u32 data_len, u32 skb_offset,
u32 prior_data_len)
{
int credits, left, len16, last_desc;
@@ -1475,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata;
struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr;
- u32 wr_mid = 0;
+ u32 wr_mid = 0, nfrags;
void *pos;
u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
- flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
+ flits += chcr_sgl_len(nfrags);
if (prior_data_len)
flits += 2;
+
/* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
@@ -1535,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
@@ -1559,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left;
}
/* send the complete packet including the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1568,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0;
}
+static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct sge_eth_txq *q)
+{
+ u32 ctrl, iplen, maclen, wr_mid = 0, len16;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ unsigned int flits, ndesc;
+ int credits, last_desc;
+ u64 cntrl1, *end;
+ void *pos;
+
+ ctrl = sizeof(*cpl);
+ flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
+
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return -ENOMEM;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return -ENOMEM;
+ }
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
+ TXPKT_INTF_V(tx_info->tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
+ TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+ cpl->len = htons(skb->len);
+
+ pos = cpl + 1;
+
+ cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
/*
* chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added.
+ * @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags.
*/
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct sk_buff *skb,
struct tls_record_info *record)
{
int i = 0;
@@ -1588,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len;
nskb->len += record->len;
nskb->truesize += record->len;
+ nskb->sk = skb->sk;
+ nskb->destructor = skb->destructor;
+ refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
@@ -1659,7 +1739,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
- struct sge_eth_txq *q,
+ struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
struct sk_buff *nskb = NULL;
@@ -1668,30 +1748,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
nskb = skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
- dev_kfree_skb_any(skb);
-
- nskb = alloc_skb(0, GFP_KERNEL);
- if (!nskb)
+ nskb = alloc_skb(0, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
+ }
+
/* copy complete record in skb */
- chcr_ktls_copy_record_in_skb(nskb, record);
+ chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq
* accordingly.
*/
tcp_seq = tls_record_start_seq(record);
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
+ /* reset skb offset */
+ skb_offset = 0;
+
+ if (last_wr)
+ dev_kfree_skb_any(skb);
+
+ last_wr = true;
+
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ last_wr, record->len, skb_offset,
+ record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
goto out;
}
+ tx_info->prev_seq = record->end_seq;
return 0;
out:
dev_kfree_skb_any(nskb);
@@ -1723,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset)
{
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0};
u32 prior_data_len = 0;
- u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH.
*/
- int remaining_record = tls_end_offset - skb->data_len;
+ int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
- int trimmed_len = skb->data_len -
- (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
- struct sk_buff *tmp_skb = NULL;
- /* don't process the pkt if it is only a partial tag */
- if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
- goto out;
+ int trimmed_len = 0;
- WARN_ON(trimmed_len > skb->data_len);
+ if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ trimmed_len = data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
+ remaining_record);
+ if (!trimmed_len)
+ return FALLBACK;
- /* shift to those many bytes */
- tmp_skb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!tmp_skb))
- goto out;
+ WARN_ON(trimmed_len > data_len);
- chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
- /* free the last trimmed portion */
- dev_kfree_skb_any(skb);
- skb = tmp_skb;
+ data_len = trimmed_len;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
}
- data_len = skb->data_len;
+
+ /* check if it is only the header part. */
+ if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id, prior_data,
+ data_len, skb_offset, prior_data_len))
+ goto out;
+
+ tx_info->prev_seq = tcp_seq + data_len;
+ return 0;
+ }
+
/* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption.
*/
@@ -1818,9 +1911,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
- /* include prio_data_len for further calculation.
- */
- data_len += prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
@@ -1829,37 +1919,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
- /* Else means, its a partial first part of the record. Check if
- * its only the header, don't need to send for encryption then.
- */
- if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
- if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
- tcp_push_no_fin, q,
- tx_info->port_id,
- prior_data,
- prior_data_len)) {
- goto out;
- }
- return 0;
- }
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data,
- prior_data_len)) {
+ prior_data_len, data_len, skb_offset)) {
goto out;
}
+ tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
return 0;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+static int chcr_ktls_sw_fallback(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ u32 data_len, skb_offset;
+ struct sk_buff *nskb;
+ struct tcphdr *th;
+
+ nskb = tls_encrypt_skb(skb);
+
+ if (!nskb)
+ return 0;
+
+ th = tcp_hdr(nskb);
+ skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ data_len = nskb->len - skb_offset;
+ skb_tx_timestamp(nskb);
+
+ if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
+ goto out;
+
+ tx_info->prev_seq = ntohl(th->seq) + data_len;
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return 0;
+}
/* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
@@ -1867,20 +1974,17 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
- u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx;
- struct sk_buff *local_skb;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
+ skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_data_len = skb->len - skb_offset;
+ data_len = skb_data_len;
- mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
-
- /* check if we haven't set it for ktls offload */
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != dev))
@@ -1892,14 +1996,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info))
goto out;
- /* don't touch the original skb, make a new skb to extract each records
- * and send them separately.
- */
- local_skb = alloc_skb(0, GFP_KERNEL);
-
- if (unlikely(!local_skb))
- return NETDEV_TX_BUSY;
-
adap = tx_info->adap;
stats = &adap->ch_ktls_stats;
port_stats = &stats->ktls_port[tx_info->port_id];
@@ -1914,20 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret)
return NETDEV_TX_BUSY;
}
- /* update tcb */
- ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
- ntohl(th->ack_seq),
- ntohs(th->window));
- if (ret) {
- dev_kfree_skb_any(local_skb);
- return NETDEV_TX_BUSY;
- }
- /* copy skb contents into local skb */
- chcr_ktls_skb_copy(skb, local_skb);
-
- /* go through the skb and send only one record at a time. */
- data_len = skb->data_len;
/* TCP segments can be in received either complete or partial.
* chcr_end_part_handler will handle cases if complete record or end
* part of the record is received. Incase of partial end part of record,
@@ -1952,10 +2035,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* update tcb for the skb */
+ if (skb_data_len == data_len) {
+ u32 tx_max = tcp_seq;
+
+ if (!tls_record_is_start_marker(record) &&
+ tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ tx_max = record->end_seq -
+ TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
+ ntohl(th->ack_seq),
+ ntohs(th->window),
+ tls_end_offset !=
+ record->len);
+ if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock,
+ flags);
+ goto out;
+ }
+
+ if (th->fin)
+ skb_get(skb);
+ }
+
if (unlikely(tls_record_is_start_marker(record))) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
- goto out;
+ /* If tls_end_offset < data_len, means there is some
+ * data after start marker, which needs encryption, send
+ * plaintext first and take skb refcount. else send out
+ * complete pkt as plaintext.
+ */
+ if (tls_end_offset < data_len)
+ skb_get(skb);
+ else
+ tls_end_offset = data_len;
+
+ ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tx_info->port_id, NULL,
+ tls_end_offset, skb_offset,
+ 0);
+
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ if (ret) {
+ /* free the refcount taken earlier */
+ if (tls_end_offset < data_len)
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ data_len -= tls_end_offset;
+ tcp_seq = record->end_seq;
+ skb_offset += tls_end_offset;
+ continue;
}
/* increase page reference count of the record, so that there
@@ -1967,73 +2104,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- tls_end_offset = record->end_seq - tcp_seq;
- pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
- tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
- struct sk_buff *nskb = NULL;
-
- if (tls_end_offset < data_len) {
- nskb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!nskb)) {
- ret = -ENOMEM;
- goto clear_ref;
- }
-
- chcr_ktls_skb_shift(nskb, local_skb,
- tls_end_offset);
- } else {
- /* its the only record in this skb, directly
- * point it.
- */
- nskb = local_skb;
- }
- ret = chcr_end_part_handler(tx_info, nskb, record,
+ ret = chcr_end_part_handler(tx_info, skb, record,
tcp_seq, mss,
(!th->fin && th->psh), q,
+ skb_offset,
tls_end_offset,
- (nskb == local_skb));
-
- if (ret && nskb != local_skb)
- dev_kfree_skb_any(local_skb);
+ skb_offset +
+ tls_end_offset == skb->len);
data_len -= tls_end_offset;
/* tcp_seq increment is required to handle next record.
*/
tcp_seq += tls_end_offset;
+ skb_offset += tls_end_offset;
} else {
- ret = chcr_short_record_handler(tx_info, local_skb,
+ ret = chcr_short_record_handler(tx_info, skb,
record, tcp_seq, mss,
(!th->fin && th->psh),
+ data_len, skb_offset,
q, tls_end_offset);
data_len = 0;
}
-clear_ref:
+
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */
- if (ret)
- goto out;
+ if (ret) {
+ if (th->fin)
+ dev_kfree_skb_any(skb);
+
+ if (ret == FALLBACK)
+ return chcr_ktls_sw_fallback(skb, tx_info, q);
+
+ return NETDEV_TX_OK;
+ }
+
/* length should never be less than 0 */
WARN_ON(data_len < 0);
} while (data_len > 0);
- tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
+ atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.
*/
- if (th->fin)
+ if (th->fin) {
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index c1651b1431a0..18b3b1f02415 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -26,6 +26,7 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu))
+#define FALLBACK 35
enum ch_ktls_open_state {
CH_KTLS_OPEN_SUCCESS = 0,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index ec4f79049a06..96d561653496 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -212,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
- refcount_add(2, &skb->users);
+ refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}
@@ -772,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -796,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
@@ -1514,7 +1511,6 @@ static void add_to_reap_list(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
@@ -1523,7 +1519,6 @@ static void add_to_reap_list(struct sock *sk)
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index f1820aca0d33..62c829023da5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
if (ret)
goto out_notcb;
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ goto out_notcb;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 9fb5ca6682ea..188d871f6b8c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1585,6 +1585,7 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
@@ -1595,8 +1596,10 @@ skip_copy:
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 00024dd41147..80fb1f537bb3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1907,6 +1907,8 @@ err_register_netdev:
clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
err_ncsi_dev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1926,6 +1928,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
netdev = platform_get_drvdata(pdev);
priv = netdev_priv(netdev);
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
clk_disable_unprepare(priv->rclk);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 06cc863f4dd6..d9c285948fc2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
#ifdef CONFIG_DPAA_ERRATUM_A050385
-#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
- + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
#else
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
- dpaa_rx_extra_headroom)
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -2840,7 +2845,8 @@ out_error:
return err;
}
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
{
u16 headroom;
@@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
*
* Also make sure the headroom is a multiple of data_align bytes
*/
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ if (port == RX)
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ else
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
@@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
goto free_dpaa_fqs;
}
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
/* All real interfaces need their ports initialized */
err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 0fa18b00c49b..d99ea0f4e4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -16,6 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI
+ select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 52be6e315752..fc2075ea57fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
+ enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
+ enetc_unlock_mdio();
+
if (unlikely(!count))
goto drop_packet_err;
@@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
/* let H/W know BD ring has been updated */
- enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
return count;
@@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
struct enetc_int_vector *v = data;
int i;
+ enetc_lock_mdio();
+
/* disable interrupts */
- enetc_wr_reg(v->rbier, 0);
- enetc_wr_reg(v->ricr1, v->rx_ictt);
+ enetc_wr_reg_hot(v->rbier, 0);
+ enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ enetc_unlock_mdio();
napi_schedule(&v->napi);
@@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
+
/* enable interrupts */
- enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
return work_done;
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
@@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_lock_mdio();
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ enetc_unlock_mdio();
do_tstamp = false;
@@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_swbd = tx_ring->tx_swbd;
}
+ enetc_lock_mdio();
+
/* BD iteration loop end */
if (is_eof) {
tx_frm_cnt++;
/* re-arm interrupt source */
- enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
- BIT(16 + tx_ring->index));
+ enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
}
if (unlikely(!bds_to_clean))
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ enetc_unlock_mdio();
}
tx_ring->next_to_clean = i;
@@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
- /* update ENETC's consumer index */
- enetc_wr_reg(rx_ring->rcir, i);
}
return j;
@@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
u64 tstamp;
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
- lo = enetc_rd(hw, ENETC_SICTR0);
- hi = enetc_rd(hw, ENETC_SICTR1);
+ lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
+ hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
rxbd = enetc_rxbd_ext(rxbd);
tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
@@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
u32 bd_status;
u16 size;
+ enetc_lock_mdio();
+
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
cleaned_cnt -= count;
}
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
- if (!bd_status)
+ if (!bd_status) {
+ enetc_unlock_mdio();
break;
+ }
- enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
size = le16_to_cpu(rxbd->r.buf_len);
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
- if (!skb)
+ if (!skb) {
+ enetc_unlock_mdio();
break;
+ }
enetc_get_offloads(rx_ring, rxbd, skb);
@@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+ enetc_unlock_mdio();
dev_kfree_skb(skb);
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
@@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_process_skb(rx_ring, skb);
+ enetc_unlock_mdio();
+
napi_gro_receive(napi, skb);
rx_frm_cnt++;
@@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+ enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 17cf7c94fdb5..eb6bbf1113c7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -324,14 +324,100 @@ struct enetc_hw {
void __iomem *global;
};
-/* general register accessors */
-#define enetc_rd_reg(reg) ioread32((reg))
-#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
+/* ENETC register accessors */
+
+/* MDIO issue workaround (on LS1028A) -
+ * Due to a hardware issue, an access to MDIO registers
+ * that is concurrent with other ENETC register accesses
+ * may lead to the MDIO access being dropped or corrupted.
+ * To protect the MDIO accesses a readers-writers locking
+ * scheme is used, where the MDIO register accesses are
+ * protected by write locks to insure exclusivity, while
+ * the remaining ENETC registers are accessed under read
+ * locks since they only compete with MDIO accesses.
+ */
+extern rwlock_t enetc_mdio_lock;
+
+/* use this locking primitive only on the fast datapath to
+ * group together multiple non-MDIO register accesses to
+ * minimize the overhead of the lock
+ */
+static inline void enetc_lock_mdio(void)
+{
+ read_lock(&enetc_mdio_lock);
+}
+
+static inline void enetc_unlock_mdio(void)
+{
+ read_unlock(&enetc_mdio_lock);
+}
+
+/* use these accessors only on the fast datapath under
+ * the enetc_lock_mdio() locking primitive to minimize
+ * the overhead of the lock
+ */
+static inline u32 enetc_rd_reg_hot(void __iomem *reg)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ return ioread32(reg);
+}
+
+static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ iowrite32(val, reg);
+}
+
+/* internal helpers for the MDIO w/a */
+static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
+{
+ u32 val;
+
+ enetc_lock_mdio();
+ val = ioread32(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
+{
+ enetc_lock_mdio();
+ iowrite32(val, reg);
+ enetc_unlock_mdio();
+}
+
+static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+
+ return val;
+}
+
+static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+}
+
#ifdef ioread64
-#define enetc_rd_reg64(reg) ioread64((reg))
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
+{
+ return ioread64(reg);
+}
#else
/* using this to read out stats on 32b systems */
-static inline u64 enetc_rd_reg64(void __iomem *reg)
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
{
u32 low, high, tmp;
@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
}
#endif
+static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
+{
+ u64 val;
+
+ enetc_lock_mdio();
+ val = _enetc_rd_reg64(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+/* general register accessors */
+#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
+#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
-#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
+#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
+#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
+ (hw)->port + (off), val)
/* global register accessors - PF only */
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 48c32a171afa..ee0116ed4738 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -16,13 +16,13 @@
static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
{
- return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
}
static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
u32 val)
{
- enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
}
#define enetc_mdio_rd(mdio_priv, off) \
@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
return hw;
}
EXPORT_SYMBOL_GPL(enetc_hw_alloc);
+
+/* Lock for MDIO access errata on LS1028A */
+DEFINE_RWLOCK(enetc_mdio_lock);
+EXPORT_SYMBOL_GPL(enetc_mdio_lock);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index c81be32bcedf..827f74e86d34 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -402,7 +402,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[0];
+ struct action_gate_entry entries[];
};
/* Only enable the green color frame now
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 832a2175636d..c527f4ee1d3a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -456,6 +456,12 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_FRREG (1 << 16)
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8f7eca1e7716..04f24c66cf36 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
static struct platform_device_id fec_devtype[] = {
@@ -1807,7 +1808,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1866,11 +1867,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- else
- ret = 0;
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -2134,15 +2133,17 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (suppress_preamble)
fep->phy_speed |= BIT(7);
- /* Clear MMFR to avoid to generate MII event by writing MSCR.
- * MII event generation condition:
- * - writing MSCR:
- * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
- * mscr_reg_data_in[7:0] != 0
- * - writing MMFR:
- * - mscr[7:0]_not_zero
- */
- writel(0, fep->hwp + FEC_MII_DATA);
+ if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2272,7 +2273,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 i, off;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2973,7 +2974,7 @@ fec_enet_open(struct net_device *ndev)
int ret;
bool reset_again;
- ret = pm_runtime_get_sync(&fep->pdev->dev);
+ ret = pm_runtime_resume_and_get(&fep->pdev->dev);
if (ret < 0)
return ret;
@@ -3767,7 +3768,7 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 41dd3d0f3452..d391a45cebb6 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
- dev_consume_skb_any(skb);
- skb = skb_new;
}
/* total number of fragments in the SKB */
@@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 714b501be7d0..ba8869c3d891 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1358,7 +1358,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 281de8326bc5..015796a20118 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -198,7 +198,7 @@ static_assert(sizeof(struct stats) == 16);
struct gve_stats_report {
__be64 written_count;
- struct stats stats[0];
+ struct stats stats[];
};
static_assert(sizeof(struct gve_stats_report) == 8);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 48a433154ce0..02e7d74779f4 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -116,9 +116,8 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
priv->tx_cfg.num_queues;
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
- priv->stats_report_len = sizeof(struct gve_stats_report) +
- (tx_stats_num + rx_stats_num) *
- sizeof(struct stats);
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+ tx_stats_num + rx_stats_num);
priv->stats_report =
dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
&priv->stats_report_bus, GFP_KERNEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 15f69fa86323..e8495f58a1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1373,7 +1373,7 @@ static int hclge_tm_bp_setup(struct hclge_dev *hdev)
return ret;
}
- return ret;
+ return 0;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 50c84c5e65d2..c8e3fdd5999c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3262,8 +3262,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7ef3369953b6..c3ec9ceed833 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1031,12 +1031,6 @@ static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
ret = -EOPNOTSUPP;
}
- if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
- netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
- netdev->stats.tx_dropped++;
- ret = -EOPNOTSUPP;
- }
-
return ret;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 8148f796a807..da15913879f8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1185,18 +1185,27 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
- return rc;
+ goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- return rc;
+ goto out;
}
}
rc = __ibmvnic_open(netdev);
+out:
+ /*
+ * If open fails due to a pending failover, set device state and
+ * return. Device operation will be handled by reset routine.
+ */
+ if (rc && adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ rc = 0;
+ }
return rc;
}
@@ -1815,9 +1824,13 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
int rc;
rc = 0;
- ether_addr_copy(adapter->mac_addr, addr->sa_data);
- if (adapter->state != VNIC_PROBED)
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (adapter->state != VNIC_PROBED) {
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+ }
return rc;
}
@@ -1918,6 +1931,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason);
rtnl_lock();
+ /*
+ * Now that we have the rtnl lock, clear any pending failover.
+ * This will ensure ibmvnic_open() has either completed or will
+ * block until failover is complete.
+ */
+ if (rwi->reset_reason == VNIC_RESET_FAILOVER)
+ adapter->failover_pending = false;
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@@ -2198,6 +2218,13 @@ static void __ibmvnic_reset(struct work_struct *work)
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
} else if (adapter->force_reset_recovery) {
+ /*
+ * Since we are doing a hard reset now, clear the
+ * failover_pending flag so we don't ignore any
+ * future MOBILITY or other resets.
+ */
+ adapter->failover_pending = false;
+
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@@ -2262,9 +2289,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
+ /*
+ * If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+ * a failover reset scheduled, we will detect and drop the
+ * duplicate reset when walking the ->rwi_list below.
+ */
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
- adapter->failover_pending) {
+ (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@@ -4709,7 +4742,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
- adapter->failover_pending = false;
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index c96e2f2d4cba..4919d22d7b6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2713,6 +2713,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2740,6 +2744,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
i40e_status ret = 0;
@@ -2759,6 +2764,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2779,10 +2786,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6acede0acdca..567fd67e900e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -281,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
struct sk_buff *skb;
- bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9112dff075cf..b673ac1199bb 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3891,21 +3891,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igc_get_stats - Get System Network Statistics
+ * igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are updated here and also from the timer callback.
*/
-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+static void igc_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ spin_lock(&adapter->stats64_lock);
if (!test_bit(__IGC_RESETTING, &adapter->state))
igc_update_stats(adapter);
-
- /* only return the current stats */
- return &netdev->stats;
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
}
static netdev_features_t igc_fix_features(struct net_device *netdev,
@@ -4855,7 +4857,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
- .ndo_get_stats = igc_get_stats,
+ .ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 54b0bf574c05..4a9041ee1b39 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2287,6 +2287,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
@@ -2295,8 +2296,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
sinfo->nr_frags++;
-
- rx_desc->buf_phys_addr = 0;
+ } else {
+ page_pool_put_full_page(rxq->page_pool, page, true);
}
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b1fcc44f566a..b6f20e2034c6 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -6,6 +6,7 @@
config PRESTERA
tristate "Marvell Prestera Switch ASICs support"
depends on NET_SWITCHDEV && VLAN_8021Q
+ depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 1b97adae542e..be5677623455 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -676,7 +676,8 @@ static int prestera_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(&pdev->dev, "fail to set DMA mask\n");
goto err_dma_mask;
}
@@ -702,8 +703,10 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
- if (!fw->wq)
+ if (!fw->wq) {
+ err = -ENOMEM;
goto err_wq_alloc;
+ }
INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 13250553263b..a8641a407c06 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -966,6 +966,7 @@ static int mtk_star_enable(struct net_device *ndev)
mtk_star_adjust_link, 0, priv->phy_intf);
if (!priv->phydev) {
netdev_err(ndev, "failed to connect to PHY\n");
+ ret = -ENODEV;
goto err_free_irq;
}
@@ -1053,7 +1054,7 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
err_drop_packet:
dev_kfree_skb(skb);
ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
/* Returns the number of bytes sent or a negative number on the first
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6ff9620a137..f6cfec81ccc3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
-#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
-#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
@@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
-#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 650ae08c71de..8f020f26ebf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
u64 cmpt_base;
u64 mtt_base;
u64 global_caps;
- u16 log_mc_entry_sz;
- u16 log_mc_hash_sz;
+ u8 log_mc_entry_sz;
+ u8 log_mc_hash_sz;
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
u8 log_num_qps;
u8 log_num_srqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 3e44e4d820c5..95f2b26a3ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
+ if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;
priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index e36e505d38ad..d29af7b9c695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ struct net_device *route_dev;
+
ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha);
/* Update the encap source mac, in case that we delete
* the flows when encap source mac changed.
*/
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
+ if (route_dev)
+ ether_addr_copy(eth->h_source, route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 7cce85faa16f..90930e54b6f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct neighbour *n;
struct rtable *rt;
@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
ip_rt_put(rt);
return ret;
}
+ dev_hold(*route_dev);
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
static const char *mlx5e_netdev_kind(struct net_device *dev)
{
if (dev->rtnl_link_ops)
@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
fl4.saddr = tun_key->u.ipv4.src;
ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
if (err)
return err;
@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -286,18 +296,18 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct dst_entry *dst;
struct neighbour *n;
@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return ret;
}
+ dev_hold(*route_dev);
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
if (err)
return err;
@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -441,7 +461,7 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 4e574ac73019..be3465ba38ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fb671a457129..8e96260fce1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0e45590662a8..381a9c8c9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -64,13 +64,13 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
+ /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 0);
+ MLX5_SET(copy_action_in, action, dst_offset, 24);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
@@ -488,13 +488,13 @@ static int rx_add_rule(struct mlx5e_priv *priv,
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
- /* Set 1 bit ipsec marker */
- /* Set 24 bit ipsec_obj_id */
+ /* Set bit[31] ipsec marker */
+ /* Set bit[23-0] ipsec_obj_id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
- MLX5_SET(set_action_in, action, offset, 7);
- MLX5_SET(set_action_in, action, length, 25);
+ MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 11e31a3db2be..a9b45606dbdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -453,7 +453,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
- u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
struct mlx5e_priv *priv;
struct xfrm_offload *xo;
struct xfrm_state *xs;
@@ -481,7 +480,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
- switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
+ switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 056dacb612b0..9df9b9a8e09b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,10 @@
#include "en.h"
#include "en/txrx.h"
-#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
-#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
-#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
@@ -78,7 +79,7 @@ static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_st
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
- return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
+ return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index ccaccb9fc2f7..6a1d82503ef8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
- spin_lock(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
return 0;
@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++;
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
}
@@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct tcphdr);
- if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
+ if (unlikely(!sk))
return;
- if (unlikely(!resync_queue_get_psv(sk)))
- return;
+ if (unlikely(sk->sk_state == TCP_TIME_WAIT))
+ goto unref;
- skb->sk = sk;
- skb->destructor = sock_edemux;
+ if (unlikely(!resync_queue_get_psv(sk)))
+ goto unref;
seq = th->seq;
datalen = skb->len - depth;
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
rq->stats->tls_resync_req_start++;
+
+unref:
+ sock_gen_put(sk);
}
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b3f02aac7f26..ebce97921e03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5253,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 9020d1419bcf..8932c387d46a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -186,7 +186,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
- struct net_device *route_dev;
+ int route_dev_ifindex;
struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 599f5b5ebc97..6628a0197b4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1584,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
- if (rq->xdp_prog)
+ if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(cqwq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e3a968e9e2a0..ce710f22b1ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4658,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4802,6 +4803,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
@@ -5227,8 +5229,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (IS_ERR(tc->ct))
+ if (IS_ERR(tc->ct)) {
+ err = PTR_ERR(tc->ct);
goto err_ct;
+ }
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3b979008143d..4a2ce241522e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -283,6 +283,9 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
reg_b = be32_to_cpu(cqe->ft_metadata);
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
+ return false;
+
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 82b4419af9d4..6dd3ea3cbbed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -144,7 +144,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
+ * need to set L3 checksum flag for IPsec
+ */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
@@ -154,7 +156,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
sq->stats->csum_partial_inner++;
} else {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
}
@@ -162,11 +163,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
- if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
- return;
- }
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -177,6 +173,9 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
+ } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6e6a9a563992..d4ee0a9c03db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (!vport->qos.enabled)
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
return mlx5_modify_scheduling_element_cmd(esw->dev,
@@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ memset(&vport->qos, 0, sizeof(vport->qos));
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1902,8 +1907,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
err = 0;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
}
mutex_unlock(&esw->state_lock);
return err;
@@ -2223,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
max_guarantee = evport->info.min_rate;
}
- return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ if (max_guarantee)
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ return 0;
}
-static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ u32 divider = calculate_vports_min_rate_divider(esw);
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
@@ -2241,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
continue;
vport_min_rate = evport->info.min_rate;
vport_max_rate = evport->info.max_rate;
- bw_share = MLX5_MIN_BW_SHARE;
+ bw_share = 0;
- if (vport_min_rate)
+ if (divider)
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
divider,
fw_max_bw_share);
@@ -2268,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
- u32 divider;
bool min_rate_supported;
bool max_rate_supported;
int err = 0;
@@ -2293,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
previous_min_rate = evport->info.min_rate;
evport->info.min_rate = min_rate;
- divider = calculate_vports_min_rate_divider(esw);
- err = normalize_vports_min_rate(esw, divider);
+ err = normalize_vports_min_rate(esw);
if (err) {
evport->info.min_rate = previous_min_rate;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 16091838bfcf..9fdd99272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -534,6 +534,13 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
+ --fte->dests_size) {
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ goto out;
+ }
+
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
fte->modify_mask |=
@@ -2010,10 +2017,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->modify_mask && fte->dests_size) {
- modify_fte(fte);
+ if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
up_write_ref_node(&fte->node, false);
- } else {
+ } else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index d046db7bb047..3a9fa629503f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
- return devlink_net(priv_to_devlink(dev));
-}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 3315afe2f8dc..38084400ee8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -168,6 +168,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
+ WARN_ON(!hash_empty(vxlan->htable));
+
+ kfree(vxlan);
+}
+
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+{
struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp;
int bkt;
@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
if (!mlx5_vxlan_allowed(vxlan))
return;
- /* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
- hash_del(&vxlanp->hlist);
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
- kfree(vxlanp);
+ /* Don't delete default UDP port added by the HW.
+ * Remove only user configured ports
+ */
+ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
+ continue;
+ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
}
-
- kfree(vxlan);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index ec766529f49b..34ef662da35e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
+static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 872e9910bb7c..a619d90559f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -6,6 +6,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
+ select MLXFW
help
This driver supports Mellanox Technologies Switch ASICs family.
@@ -82,7 +83,6 @@ config MLXSW_SPECTRUM
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
- select MLXFW
imply PTP_1588_CLOCK
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 7f77c2a71d1c..1a86535c4968 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -571,7 +571,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
if (trans->core->fw_flash_in_progress)
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
- queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
+ timeout << trans->retries);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -620,6 +621,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_transmit(trans->core, trans);
if (err == 0)
return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
} else {
err = -EIO;
}
@@ -2064,6 +2068,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (!reload)
+ devlink_free(devlink);
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16b47fce540b..b08853f71b2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1174,11 +1174,14 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap_masked;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- /* Set advertised speeds to supported speeds. */
+ /* Set advertised speeds to speeds supported by both the driver
+ * and the device.
+ */
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
@@ -1187,8 +1190,10 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
+ eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_cap, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap_masked,
+ mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3e26eb6cb140..74b3959b36d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -342,6 +342,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
u32 *p_eth_proto_oper);
+ u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
static inline struct net_device *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2096b6478958..540616469e28 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1303,6 +1303,20 @@ mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_oper);
}
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp1_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
@@ -1313,6 +1327,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
};
static const enum ethtool_link_mode_bit_indices
@@ -1731,6 +1746,20 @@ mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_admin, p_eth_proto_oper);
}
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp2_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
@@ -1741,4 +1770,5 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a1938842f828..b319c22c211c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -148,7 +148,8 @@ static void lan743x_intr_software_isr(void *context)
int_sts = lan743x_csr_read(adapter, INT_STS);
if (int_sts & INT_BIT_SW_GP_) {
- lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
intr->software_isr_flag = 1;
}
}
@@ -674,14 +675,12 @@ clean_up:
static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 select, u32 addr, u32 length, u32 *buf)
{
- int ret = -EIO;
u32 dp_sel;
int i;
- mutex_lock(&adapter->dp_lock);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
dp_sel |= select;
@@ -693,13 +692,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
}
- ret = 0;
-unlock:
- mutex_unlock(&adapter->dp_lock);
- return ret;
+ return 0;
}
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
@@ -1019,16 +1015,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct phy_device *phydev = NULL;
struct device_node *phynode;
- struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
phynode = of_node_get(adapter->pdev->dev.of_node);
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
if (phynode) {
+ /* try devicetree phy, or fixed link */
of_get_phy_mode(phynode, &adapter->phy_mode);
if (of_phy_is_fixed_link(phynode)) {
@@ -1044,13 +1040,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
lan743x_phy_link_status_change, 0,
adapter->phy_mode);
of_node_put(phynode);
- if (!phydev)
- goto return_error;
- } else {
+ }
+
+ if (!phydev) {
+ /* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
if (!phydev)
goto return_error;
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
adapter->phy_mode);
@@ -1310,13 +1308,13 @@ clean_up_data_descriptor:
goto clear_active;
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
goto clear_skb;
}
if (cleanup) {
lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
} else {
ignore_sync = (buffer_info->flags &
TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
@@ -1626,7 +1624,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors >
lan743x_tx_get_avail_desc(tx)) {
if (required_number_of_descriptors > (tx->ring_size - 1)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
} else {
/* save to overflow buffer */
tx->overflow_skb = skb;
@@ -1659,7 +1657,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
start_frame_length,
do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
@@ -1678,7 +1676,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
* frame assembler clean up was performed inside
* lan743x_tx_frame_add_fragment
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
}
@@ -2733,7 +2731,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
- mutex_init(&adapter->dp_lock);
ret = lan743x_gpio_init(adapter);
if (ret)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index c61a40411317..a536f4a4994d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -712,9 +712,6 @@ struct lan743x_adapter {
struct lan743x_csr csr;
struct lan743x_intr intr;
- /* lock, used to prevent concurrent access to data port */
- struct mutex dp_lock;
-
struct lan743x_gpio gpio;
struct lan743x_ptp ptp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 545c99b15df8..dc5fbc2704f3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -333,7 +333,7 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
.vf_setattr.attr = attr,
- .vf_setattr.vf_index = vf,
+ .vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
@@ -391,7 +391,7 @@ void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
{
union ionic_dev_cmd cmd = {
.q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
- .q_identify.lif_type = lif_type,
+ .q_identify.lif_type = cpu_to_le16(lif_type),
.q_identify.type = qtype,
.q_identify.ver = qver,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c109cd5a0471..6c243b17312c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -29,6 +29,7 @@ struct ionic_dev_bar {
int res_index;
};
+#ifndef __CHECKER__
/* Registers */
static_assert(sizeof(struct ionic_intr) == 32);
@@ -119,6 +120,7 @@ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+#endif /* __CHECKER__ */
struct ionic_devinfo {
u8 asic_type;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index ed9808fc743b..35c72d4a78b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(ks, supported);
+ if (!idev->port_info) {
+ netdev_err(netdev, "port_info not initialized\n");
+ return -EOPNOTSUPP;
+ }
+
/* The port_info data is found in a DMA space that the NIC keeps
* up-to-date, so there's no need to request the data from the
* NIC, we already have it in our memory space.
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index f492ae406a60..d7bbf336c6f6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -27,9 +27,9 @@ static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
{
union ionic_dev_cmd cmd = {
.fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
- .fw_download.offset = offset,
- .fw_download.addr = addr,
- .fw_download.length = length
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
};
ionic_dev_cmd_go(idev, &cmd);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index d655a7ae3058..a12df3946a07 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1656,7 +1656,6 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
- ionic_rx_flush(&lif->rxqcqs[i]->cq);
ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
@@ -1915,11 +1914,11 @@ static int ionic_get_vf_config(struct net_device *netdev,
ret = -EINVAL;
} else {
ivf->vf = vf;
- ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
ivf->qos = 0;
ivf->spoofchk = ionic->vfs[vf].spoofchk;
ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
ivf->trusted = ionic->vfs[vf].trusted;
ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
}
@@ -2019,7 +2018,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
if (!ret)
- ionic->vfs[vf].vlanid = vlan;
+ ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
up_write(&ionic->vf_op_lock);
@@ -2048,7 +2047,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
if (!ret)
- lif->ionic->vfs[vf].maxrate = tx_max;
+ lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
@@ -2981,14 +2980,14 @@ void ionic_lif_unregister(struct ionic_lif *lif)
static void ionic_lif_queue_identify(struct ionic_lif *lif)
{
+ union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
- union ionic_q_identity *q_ident;
struct ionic_dev *idev;
int qtype;
int err;
idev = &lif->ionic->idev;
- q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+ q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
@@ -3011,14 +3010,14 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
ionic_qtype_versions[qtype]);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
if (!err) {
- qti->version = q_ident->version;
- qti->supported = q_ident->supported;
- qti->features = le64_to_cpu(q_ident->features);
- qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
- qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
- qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
- qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
- qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ qti->version = readb(&q_ident->version);
+ qti->supported = readb(&q_ident->supported);
+ qti->features = readq(&q_ident->features);
+ qti->desc_sz = readw(&q_ident->desc_sz);
+ qti->comp_sz = readw(&q_ident->comp_sz);
+ qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
+ qti->max_sg_elems = readw(&q_ident->max_sg_elems);
+ qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
}
mutex_unlock(&ionic->dev_cmd_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index ee0740881af3..d355676f6c16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -311,7 +311,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
iowrite32(0, &regs->doorbell);
memset_io(&regs->cmd, 0, sizeof(regs->cmd));
@@ -333,7 +333,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
+ opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index 3f543512616e..2a725834f792 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -49,7 +49,7 @@ extern const int ionic_num_stats_grps;
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
- __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+ __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 169ac4f54640..b3d2250c77d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -200,7 +200,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__wsum)le16_to_cpu(comp->csum);
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
stats->csum_complete++;
}
} else {
@@ -253,19 +253,6 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-void ionic_rx_flush(struct ionic_cq *cq)
-{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_rx_service, NULL, NULL);
-
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_page_info *page_info)
{
@@ -413,22 +400,20 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- struct ionic_rxq_desc *desc;
- unsigned int i;
- u16 idx;
-
- idx = q->tail_idx;
- while (idx != q->head_idx) {
- desc_info = &q->info[idx];
- desc = desc_info->desc;
- desc->addr = 0;
- desc->len = 0;
+ struct ionic_page_info *page_info;
+ unsigned int i, j;
- for (i = 0; i < desc_info->npages; i++)
- ionic_rx_page_free(q, &desc_info->pages[i]);
+ for (i = 0; i < q->num_descs; i++) {
+ desc_info = &q->info[i];
+ for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ page_info = &desc_info->pages[j];
+ if (page_info->page)
+ ionic_rx_page_free(q, page_info);
+ }
+ desc_info->npages = 0;
+ desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- idx = (idx + 1) & (q->num_descs - 1);
}
}
@@ -812,6 +797,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
skb_frag_t *frag;
bool start, done;
bool outer_csum;
+ dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
@@ -893,11 +879,10 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
if (frag_left > 0) {
len = min(frag_left, left);
frag_left -= len;
- elem->addr =
- cpu_to_le64(ionic_tx_map_frag(q, frag,
- offset, len));
- if (dma_mapping_error(dev, elem->addr))
+ addr = ionic_tx_map_frag(q, frag, offset, len);
+ if (dma_mapping_error(dev, addr))
goto err_out_abort;
+ elem->addr = cpu_to_le64(addr);
elem->len = cpu_to_le16(len);
elem++;
desc_nsge++;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index a5883be0413f..7667b72232b8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,7 +4,6 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
-void ionic_rx_flush(struct ionic_cq *cq);
void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0e4cd8890cff..0a22f8ce9a2c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1647,9 +1647,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
ilog2(rounded_conn_num));
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->first_free);
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->last_free);
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
}
/* Timers PF */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b64495f8745..056e79620a0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -326,9 +326,6 @@ struct qed_cxt_mngr {
/* SRC T2 */
struct qed_src_t2 src_t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 512cbef24097..a99861124630 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2754,14 +2754,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
sizeof(*iwarp_info->partial_fpdus),
GFP_KERNEL);
- if (!iwarp_info->partial_fpdus)
+ if (!iwarp_info->partial_fpdus) {
+ rc = -ENOMEM;
goto err;
+ }
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
- if (!iwarp_info->mpa_intermediate_buf)
+ if (!iwarp_info->mpa_intermediate_buf) {
+ rc = -ENOMEM;
goto err;
+ }
/* The mpa_bufs array serves for pending RX packets received on the
* mpa ll2 that don't have place on the tx ring and require later
@@ -2771,8 +2775,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
sizeof(*iwarp_info->mpa_bufs),
GFP_KERNEL);
- if (!iwarp_info->mpa_bufs)
+ if (!iwarp_info->mpa_bufs) {
+ rc = -ENOMEM;
goto err;
+ }
INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b8af59fc1aa4..d2c190732d3e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2231,7 +2231,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
/* Boot either flash image or firmware image from host file system */
if (qlcnic_load_fw_file == 1) {
- if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ err = qlcnic_83xx_load_fw_image_from_host(adapter);
+ if (err)
return err;
} else {
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29a7bfa2584d..3d7d3ab383f8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
+ if (unlikely(!port)) {
+ atomic_long_inc(&skb->dev->rx_nohandler);
+ kfree_skb(skb);
+ goto done;
+ }
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3b6ddc706e92..85d9c3e30c69 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4080,9 +4080,17 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
{
- return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ return true;
+ default:
+ return false;
+ }
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4154,8 +4162,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !eth_skb_pad(skb);
+ if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+ /* eth_skb_pad would free the skb on error */
+ return !__skb_put_padto(skb, ETH_ZLEN, false);
}
return true;
@@ -4334,18 +4343,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->len < ETH_ZLEN) {
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_34:
- features &= ~NETIF_F_CSUM_MASK;
- break;
- default:
- break;
- }
- }
+ /* work around hw bug on some chip versions */
+ if (skb->len < ETH_ZLEN)
+ features &= ~NETIF_F_CSUM_MASK;
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
@@ -4573,7 +4573,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -4746,7 +4746,7 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+ IRQF_SHARED, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9c4df4ede011..bd30505fbc57 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1744,12 +1744,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index f61cb997a8f6..82b1c7a5a7a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -113,8 +113,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
+ if (IS_ERR(dwmac->tx_clk)) {
+ ret = PTR_ERR(dwmac->tx_clk);
goto err_remove_config_dt;
+ }
clk_prepare_enable(dwmac->tx_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index b6e5e3e36b63..81ee0a071b4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -625,13 +625,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
@@ -641,6 +634,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
res.wol_irq = pci_irq_vector(pdev, 0);
res.irq = pci_irq_vector(pdev, 0);
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index cb87d31a99df..57a53a600aa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 200000);
}
/* CSR1 enables the transmit DMA to check for new descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 220626a8d499..ba45fe237512 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4757,6 +4757,7 @@ static void stmmac_napi_add(struct net_device *dev)
ch->priv_data = priv;
ch->index = queue;
+ spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
@@ -5246,6 +5247,7 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -5261,6 +5263,7 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
mutex_unlock(&priv->lock);
+ rtnl_unlock();
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 75056c14b161..5dc60ecabe56 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -1001,8 +1001,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
dev_err(dev, "Failed to register ptp clk %ld\n",
PTR_ERR(cpts->ptp_clock));
- if (!cpts->ptp_clock)
- ret = -ENODEV;
+ ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
goto refclk_disable;
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9fd1f77190ad..b0f00b4edd94 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -838,9 +838,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
-
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
}
cpsw_restore(priv);
@@ -1631,6 +1634,7 @@ static int cpsw_probe(struct platform_device *pdev)
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(dev, "error allocating net_device\n");
+ ret = -ENOMEM;
goto clean_cpts;
}
@@ -1716,7 +1720,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
cpsw_notice(priv, probe,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 4d02c5135611..4619c3a950b0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -728,7 +728,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index f779d2e1b5c5..2f5e0ad23ad7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -873,8 +873,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
napi_enable(&cpsw->napi_rx);
napi_enable(&cpsw->napi_tx);
@@ -2006,7 +2010,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 51cc29f39038..31c5e36ff706 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: