summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c132
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c52
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c51
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_regs.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
36 files changed, 261 insertions, 273 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4647d7656761..cada6e7e30f4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4336,13 +4336,15 @@ err_disable_device:
/*****************************************************************************/
-/* ena_remove - Device Removal Routine
+/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
* @pdev: PCI device information struct
+ * @shutdown: Is it a shutdown operation? If false, means it is a removal
*
- * ena_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
+ * __ena_shutoff is a helper routine that does the real work on shutdown and
+ * removal paths; the difference between those paths is with regards to whether
+ * dettach or unregister the netdevice.
*/
-static void ena_remove(struct pci_dev *pdev)
+static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
{
struct ena_adapter *adapter = pci_get_drvdata(pdev);
struct ena_com_dev *ena_dev;
@@ -4361,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
- rtnl_lock();
+ rtnl_lock(); /* lock released inside the below if-else block */
ena_destroy_device(adapter, true);
- rtnl_unlock();
-
- unregister_netdev(netdev);
-
- free_netdev(netdev);
+ if (shutdown) {
+ netif_device_detach(netdev);
+ dev_close(netdev);
+ rtnl_unlock();
+ } else {
+ rtnl_unlock();
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ }
ena_com_rss_destroy(ena_dev);
@@ -4382,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev)
vfree(ena_dev);
}
+/* ena_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+
+static void ena_remove(struct pci_dev *pdev)
+{
+ __ena_shutoff(pdev, false);
+}
+
+/* ena_shutdown - Device Shutdown Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_shutdown is called by the PCI subsystem to alert the driver that
+ * a shutdown/reboot (or kexec) is happening and device must be disabled.
+ */
+
+static void ena_shutdown(struct pci_dev *pdev)
+{
+ __ena_shutoff(pdev, true);
+}
+
#ifdef CONFIG_PM
/* ena_suspend - PM suspend callback
* @pdev: PCI device information struct
@@ -4431,6 +4461,7 @@ static struct pci_driver ena_pci_driver = {
.id_table = ena_pci_tbl,
.probe = ena_probe,
.remove = ena_remove,
+ .shutdown = ena_shutdown,
#ifdef CONFIG_PM
.suspend = ena_suspend,
.resume = ena_resume,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c5c8effc0139..d28b406a26b1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6880,12 +6880,12 @@ skip_rdma:
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
- if (rc)
+ if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
- else
- ctx->flags |= BNXT_CTX_FLAG_INITED;
-
+ return rc;
+ }
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
return 0;
}
@@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
pri2cos = &resp2->pri0_cos_queue_id;
for (i = 0; i < 8; i++) {
u8 queue_id = pri2cos[i];
+ u8 queue_idx;
+ /* Per port queue IDs start from 0, 10, 20, etc */
+ queue_idx = queue_id % 10;
+ if (queue_idx > BNXT_MAX_QUEUE) {
+ bp->pri2cos_valid = false;
+ goto qstats_done;
+ }
for (j = 0; j < bp->max_q; j++) {
if (bp->q_ids[j] == queue_id)
- bp->pri2cos[i] = j;
+ bp->pri2cos_idx[i] = queue_idx;
}
}
bp->pri2cos_valid = 1;
}
+qstats_done:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -11669,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->rx_nr_rings++;
bp->cp_nr_rings++;
}
+ if (rc) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
return rc;
}
@@ -11962,12 +11974,12 @@ init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
init_err_free:
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index cabef0b4f5fb..63b170658532 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1716,7 +1716,7 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
- u8 pri2cos[8];
+ u8 pri2cos_idx[8];
u8 pri2cos_valid;
u16 hwrm_max_req_len;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fb6f30d0d1d0..b1511bcffb1b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
struct bnxt *bp = netdev_priv(dev);
struct ieee_ets *my_ets = bp->ieee_ets;
+ int rc;
ets->ets_cap = bp->max_tc;
if (!my_ets) {
- int rc;
-
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
return 0;
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
if (!my_ets)
- return 0;
+ return -ENOMEM;
rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
if (rc)
- return 0;
+ goto error;
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
if (rc)
- return 0;
+ goto error;
+
+ /* cache result */
+ bp->ieee_ets = my_ets;
}
ets->cbs = my_ets->cbs;
@@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
+error:
+ kfree(my_ets);
+ return rc;
}
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f67e6729a2c..3f8a1ded662a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -589,25 +589,25 @@ skip_ring_stats:
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos[i];
+ bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ce64b4e47feb..1d678bee2cc9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
}
-static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
- void __iomem *d)
-{
- return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
-}
-
static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
void __iomem *d,
dma_addr_t addr)
@@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
-static void bcmgenet_set_rx_csum(struct net_device *dev,
- netdev_features_t wanted)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 rbuf_chk_ctrl;
- bool rx_csum_en;
-
- rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
-
- rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
-
- /* enable rx checksumming */
- if (rx_csum_en)
- rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
- else
- rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
- priv->desc_rxchk_en = rx_csum_en;
-
- /* If UniMAC forwards CRC, we need to skip over it to get
- * a valid CHK bit to be set in the per-packet status word
- */
- if (rx_csum_en && priv->crc_fwd_en)
- rbuf_chk_ctrl |= RBUF_SKIP_FCS;
- else
- rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
-
- bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-}
-
-static void bcmgenet_set_tx_csum(struct net_device *dev,
- netdev_features_t wanted)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- bool desc_64b_en;
- u32 tbuf_ctrl, rbuf_ctrl;
-
- tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
- rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
-
- desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
-
- /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
- if (desc_64b_en) {
- tbuf_ctrl |= RBUF_64B_EN;
- rbuf_ctrl |= RBUF_64B_EN;
- } else {
- tbuf_ctrl &= ~RBUF_64B_EN;
- rbuf_ctrl &= ~RBUF_64B_EN;
- }
- priv->desc_64b_en = desc_64b_en;
-
- bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
- bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-}
-
static int bcmgenet_set_features(struct net_device *dev,
netdev_features_t features)
{
@@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev,
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
- bcmgenet_set_tx_csum(dev, features);
- bcmgenet_set_rx_csum(dev, features);
-
clk_disable_unprepare(priv->clk);
return ret;
@@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
-static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
- struct sk_buff *skb)
+static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
+ struct sk_buff *skb)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
@@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
*/
GENET_CB(skb)->bytes_sent = skb->len;
- /* set the SKB transmit checksum */
- if (priv->desc_64b_en) {
- skb = bcmgenet_put_tx_csum(dev, skb);
- if (!skb) {
- ret = NETDEV_TX_OK;
- goto out;
- }
+ /* add the Transmit Status Block */
+ skb = bcmgenet_add_tsb(dev, skb);
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto out;
}
for (i = 0; i <= nr_frags; i++) {
@@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
+ struct status_64 *status;
+ __be16 rx_csum;
+
cb = &priv->rx_cbs[ring->read_ptr];
skb = bcmgenet_rx_refill(priv, cb);
@@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
goto next;
}
- if (!priv->desc_64b_en) {
- dma_length_status =
- dmadesc_get_length_status(priv, cb->bd_addr);
- } else {
- struct status_64 *status;
- __be16 rx_csum;
-
- status = (struct status_64 *)skb->data;
- dma_length_status = status->length_status;
+ status = (struct status_64 *)skb->data;
+ dma_length_status = status->length_status;
+ if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- if (priv->desc_rxchk_en) {
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
/* DMA flags and length are still valid no matter how
@@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
} /* error packet */
skb_put(skb, len);
- if (priv->desc_64b_en) {
- skb_pull(skb, 64);
- len -= 64;
- }
- /* remove hardware 2bytes added for IP alignment */
- skb_pull(skb, 2);
- len -= 2;
+ /* remove RSB and hardware 2bytes added for IP alignment */
+ skb_pull(skb, 66);
+ len -= 66;
if (priv->crc_fwd_en) {
skb_trim(skb, len - ETH_FCS_LEN);
@@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv)
bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
- /* init rx registers, enable ip header optimization */
+ /* init tx registers, enable TSB */
+ reg = bcmgenet_tbuf_ctrl_get(priv);
+ reg |= TBUF_64B_EN;
+ bcmgenet_tbuf_ctrl_set(priv, reg);
+
+ /* init rx registers, enable ip header optimization and RSB */
reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
- reg |= RBUF_ALIGN_2B;
+ reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+ /* enable rx checksumming */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+ reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->crc_fwd_en)
+ reg |= RBUF_SKIP_FCS;
+ else
+ reg &= ~RBUF_SKIP_FCS;
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
+
if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 61a6fe9f4cec..daf8fb2c39b6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -273,6 +273,7 @@ struct bcmgenet_mib_counters {
#define RBUF_FLTR_LEN_SHIFT 8
#define TBUF_CTRL 0x00
+#define TBUF_64B_EN (1 << 0)
#define TBUF_BP_MC 0x0C
#define TBUF_ENERGY_CTRL 0x14
#define TBUF_EEE_EN (1 << 0)
@@ -662,8 +663,6 @@ struct bcmgenet_priv {
unsigned int irq0_stat;
/* HW descriptors/checksum variables */
- bool desc_64b_en;
- bool desc_rxchk_en;
bool crc_fwd_en;
u32 dma_max_burst_length;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 58a039c3224a..af1f40cbccc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+ c.u.ts.sign = (delta < 0) ? 1 : 0;
+ if (delta < 0)
+ delta = -delta;
c.u.ts.tm = cpu_to_be64(delta);
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 97cda501e7e8..cab3d17e0e1a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
int maxreclaim)
{
+ unsigned int reclaimed, hw_cidx;
struct sge_txq *q = &eq->q;
- unsigned int reclaimed;
+ int hw_in_use;
if (!q->in_use || !__netif_tx_trylock(eq->txq))
return 0;
@@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
/* Reclaim pending completed TX Descriptors. */
reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
+ hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ hw_in_use = q->pidx - hw_cidx;
+ if (hw_in_use < 0)
+ hw_in_use += q->size;
+
/* If the TX Queue is currently stopped and there's now more than half
* the queue available, restart it. Otherwise bail out since the rest
* of what we want do here is with the possibility of shipping any
* currently buffered Coalesced TX Work Request.
*/
- if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+ if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
netif_tx_wake_queue(eq->txq);
eq->q.restarts++;
}
@@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
-
- /* If we're using the SGE Doorbell Queue Timer facility, we
- * don't need to ask the Firmware to send us Egress Queue CIDX
- * Updates: the Hardware will do this automatically. And
- * since we send the Ingress Queue CIDX Updates to the
- * corresponding Ethernet Response Queue, we'll get them very
- * quickly.
- */
- if (!q->dbqt)
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
-
- /* If we're using the SGE Doorbell Queue Timer facility, we
- * don't need to ask the Firmware to send us Egress Queue CIDX
- * Updates: the Hardware will do this automatically. And
- * since we send the Ingress Queue CIDX Updates to the
- * corresponding Ethernet Response Queue, we'll get them very
- * quickly.
- */
- if (!txq->dbqt)
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
-
- /* We've got the Hardware Consumer Index Update in the Egress Update
- * message. If we're using the SGE Doorbell Queue Timer mechanism,
- * these Egress Update messages will be our sole CIDX Updates we get
- * since we don't want to chew up PCIe bandwidth for both Ingress
- * Messages and Status Page writes. However, The code which manages
- * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
- * stored in the Status Page at the end of the TX Queue. It's easiest
- * to simply copy the CIDX Update value from the Egress Update message
- * to the Status Page. Also note that no Endian issues need to be
- * considered here since both are Big Endian and we're just copying
- * bytes consistently ...
- */
- if (txq->dbqt) {
- struct cpl_sge_egr_update *egr;
-
- egr = (struct cpl_sge_egr_update *)rsp;
- WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
- }
-
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index eb53c15b13f3..5f2d57d1b2d3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
spin_unlock_bh(&cmdq->cmdq_lock);
- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&done,
+ msecs_to_jiffies(CMDQ_TIMEOUT))) {
spin_lock_bh(&cmdq->cmdq_lock);
if (cmdq->errcode[curr_prod_idx] == &errcode)
@@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
return -EBUSY;
+ dma_rmb();
+
errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
cmdq_sync_cmd_handler(cmdq, ci, errcode);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 79b3d53f2fbf..c7c75b772a86 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
return -EFAULT;
}
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_io_status cmd_io_status;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- unsigned long end;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
- do {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_STATUS_GET,
- &cmd_io_status, sizeof(cmd_io_status),
- &cmd_io_status, &out_size,
- HINIC_MGMT_MSG_SYNC);
- if ((err) || (out_size != sizeof(cmd_io_status))) {
- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
- err);
- return err;
- }
-
- if (cmd_io_status.status == IO_STOPPED) {
- dev_info(&pdev->dev, "IO stopped\n");
- return 0;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
- return -ETIMEDOUT;
-}
-
/**
* clear_io_resource - set the IO resources as not active in the NIC
* @hwdev: the NIC HW device
@@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
return -EINVAL;
}
- err = wait_for_io_stopped(hwdev);
- if (err) {
- dev_err(&pdev->dev, "IO has not stopped yet\n");
- return err;
- }
+ /* sleep 100ms to wait for firmware stopping I/O */
+ msleep(100);
cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 79243b626ddb..c0b6bcb067cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
* eq_update_ci - update the HW cons idx of event queue
* @eq: the event queue to update the cons idx for
**/
-static void eq_update_ci(struct hinic_eq *eq)
+static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
{
u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
@@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq)
val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
- HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
+ HINIC_EQ_CI_SET(arm_state, INT_ARMED);
val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
@@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
break;
+ dma_rmb();
+
event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
if (event >= HINIC_MAX_AEQ_EVENTS) {
dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
@@ -347,7 +349,7 @@ static void eq_irq_handler(void *data)
else if (eq->type == HINIC_CEQ)
ceq_irq_handler(eq);
- eq_update_ci(eq);
+ eq_update_ci(eq, EQ_ARMED);
}
/**
@@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
}
set_eq_ctrls(eq);
- eq_update_ci(eq);
+ eq_update_ci(eq, EQ_ARMED);
err = alloc_eq_pages(eq);
if (err) {
@@ -752,18 +754,28 @@ err_req_irq:
**/
static void remove_eq(struct hinic_eq *eq)
{
- struct msix_entry *entry = &eq->msix_entry;
-
- free_irq(entry->vector, eq);
+ hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
+ HINIC_MSIX_DISABLE);
+ free_irq(eq->msix_entry.vector, eq);
if (eq->type == HINIC_AEQ) {
struct hinic_eq_work *aeq_work = &eq->aeq_work;
cancel_work_sync(&aeq_work->work);
+ /* clear aeq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
} else if (eq->type == HINIC_CEQ) {
tasklet_kill(&eq->ceq_tasklet);
+ /* clear ceq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwif,
+ HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
}
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
+ eq_update_ci(eq, EQ_NOT_ARMED);
+
free_eq_pages(eq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index c1a6be6bf6a8..8995e32dd1c0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -43,7 +43,7 @@
#define MSG_NOT_RESP 0xFFFF
-#define MGMT_MSG_TIMEOUT 1000
+#define MGMT_MSG_TIMEOUT 5000
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
+ if (!wait_for_completion_timeout(recv_done,
+ msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 2695ad69fca6..815649e37cb1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
if (!rq_wqe)
break;
+ /* make sure we read rx_done before packet length */
+ dma_rmb();
+
cqe = rq->cqe[ci];
status = be32_to_cpu(cqe->status);
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 0e13d1c7e474..365016450bdb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -45,7 +45,7 @@
#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
-#define MIN_SKB_LEN 17
+#define MIN_SKB_LEN 32
#define MAX_PAYLOAD_OFFSET 221
#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
@@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
do {
hw_ci = HW_CONS_IDX(sq) & wq->mask;
+ dma_rmb();
+
/* Reading a WQEBB to get real WQE size and consumer index. */
sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
if ((!sq_wqe) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 220ef9f06f84..c9606b8ab6ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -371,6 +371,7 @@ enum {
struct mlx5e_sq_wqe_info {
u8 opcode;
+ u8 num_wqebbs;
/* Auxiliary data for different opcodes. */
union {
@@ -1059,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index d3693fa547ac..e54f70d9af22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -10,8 +10,7 @@
static inline bool cqe_syndrome_needs_recover(u8 syndrome)
{
- return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
- syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+ return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 6c72b592315b..a01e2de2488f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_reset_icosq_cc_pc(icosq);
- mlx5e_free_rx_descs(rq);
+ mlx5e_free_rx_in_progress_descs(rq);
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
mlx5e_activate_rq(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index a226277b0980..f07b1399744e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
{
- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
mlx5_wq_ll_reset(&rq->mpwqe.wq);
- else
+ rq->mpwqe.actual_wq_head = 0;
+ } else {
mlx5_wq_cyc_reset(&rq->wqe.wq);
+ }
}
/* SW parser related functions */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index a3efa29a4629..63116be6b1d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -38,8 +38,8 @@ enum {
enum {
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
- MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
- MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1,
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
};
struct mlx5e_ktls_offload_context_tx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f260dd96873b..52a56622034a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
* this packet was already acknowledged and its record info
* was released.
*/
- ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+ ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
if (unlikely(tls_record_is_start_marker(record))) {
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 21de4764d4c0..4ef3dc79f73c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -813,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
return -ETIMEDOUT;
}
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq;
+ u16 head;
+ int i;
+
+ if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return;
+
+ wq = &rq->mpwqe.wq;
+ head = wq->head;
+
+ /* Outstanding UMR WQEs (in progress) start at wq->head */
+ for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+ rq->dealloc_wqe(rq, head);
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+ }
+
+ rq->mpwqe.actual_wq_head = wq->head;
+ rq->mpwqe.umr_in_progress = 0;
+ rq->mpwqe.umr_completed = 0;
+}
+
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
__be16 wqe_ix_be;
@@ -820,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- u16 head = wq->head;
- int i;
- /* Outstanding UMR WQEs (in progress) start at wq->head */
- for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
- rq->dealloc_wqe(rq, head);
- head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
- }
+ mlx5e_free_rx_in_progress_descs(rq);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1c3ab69cbd96..312d4692425b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
wi->opcode = MLX5_OPCODE_NOP;
+ wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
}
@@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
sq->db.ico_wqe[pi].umr.rq = rq;
sq->pc += MLX5E_UMR_WQEBBS;
@@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.ico_wqe[ci];
+ sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
@@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
- sqcc += MLX5E_UMR_WQEBBS;
+ if (likely(wi->opcode == MLX5_OPCODE_UMR))
wi->umr.rq->mpwqe.umr_completed++;
- } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
- sqcc++;
- } else {
+ else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OPCODE in ICOSQ WQE info: 0x%x\n",
wi->opcode);
- }
} while (!last_wqe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 74091f72c9a8..ec5fc52bf572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
continue;
if (f->field_bsize == 32) {
- mask_be32 = *(__be32 *)&mask;
+ mask_be32 = (__be32)mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (f->field_bsize == 16) {
- mask_be16 = *(__be16 *)&mask;
+ mask_be32 = (__be32)mask;
+ mask_be16 = *(__be16 *)&mask_be32;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 257a7c9f7a14..800d34ed8a96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 8e19f6ab8393..93052b07c76c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -615,8 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
break;
if (i == MLX5_MAX_PORTS) {
- if (ldev->nb.notifier_call)
+ if (ldev->nb.notifier_call) {
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+ ldev->nb.notifier_call = NULL;
+ }
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6dec2a550a10..2d93228ff633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
action->rewrite.data = (void *)ops;
action->rewrite.num_of_actions = i;
- action->rewrite.chunk->byte_size = i * sizeof(*ops);
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index c7f10d4f8f8d..095ec7b1399d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
int ret;
send_info.write.addr = (uintptr_t)action->rewrite.data;
- send_info.write.length = action->rewrite.chunk->byte_size;
+ send_info.write.length = action->rewrite.num_of_actions *
+ DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
send_info.remote_addr = action->rewrite.chunk->mr_addr;
send_info.rkey = action->rewrite.chunk->rkey;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 1faac31f74d0..23f879da9104 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
+ req->cap_mask1_perm);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
ex:
kfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 914c33e46fb4..e9ded1a6e131 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
mbox->mapaddr);
}
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
- const struct pci_device_id *id)
+static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id,
+ u32 *p_sys_status)
{
unsigned long end;
- char mrsr_pl[MLXSW_REG_MRSR_LEN];
- int err;
+ u32 val;
- mlxsw_reg_mrsr_pack(mrsr_pl);
- err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
- if (err)
- return err;
if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
return 0;
}
- /* We must wait for the HW to become responsive once again. */
+ /* We must wait for the HW to become responsive. */
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
- u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
-
+ val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
return 0;
cond_resched();
} while (time_before(jiffies, end));
+
+ *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
+
return -EBUSY;
}
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ u32 sys_status;
+ int err;
+
+ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
+ sys_status);
+ return err;
+ }
+
+ mlxsw_reg_mrsr_pack(mrsr_pl);
+ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ if (err)
+ return err;
+
+ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
+ sys_status);
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 54275624718b..336e5ecc68f8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
return 0;
err_erif_unresolve:
- list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
- vif_node)
+ list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
+ vif_node)
mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
err_irif_unresolve:
- list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
- vif_node)
+ list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
+ vif_node)
mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
mr_vif->rif = NULL;
return err;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 54547d53b0f2..51adf5059834 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
#ifndef _IONIC_IF_H_
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h
index 03ee5a36472b..2e174f45c030 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_regs.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
/* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */
#ifndef IONIC_REGS_H
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 07f9067affc6..cda5b0a9e948 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
ahw->reset.seq_error = 0;
ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
- if (p_dev->ahw->reset.buff == NULL)
+ if (ahw->reset.buff == NULL)
return -ENOMEM;
p_buff = p_dev->ahw->reset.buff;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a2168a14794c..a9bdafd15a35 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
/* fall through */
- case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
flags = PCI_IRQ_LEGACY;
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index dc50ba13a746..2d5573b3dee1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
ret = rk_gmac_clk_init(plat_dat);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = rk_gmac_powerup(plat_dat->bsp_priv);
if (ret)